xref: /haiku/headers/private/system/arch/riscv64/arch_cpu_defs.h (revision 52c4471a3024d2eb81fe88e2c3982b9f8daa5e56)
1 /*
2  * Copyright 2021, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef _SYSTEM_ARCH_RISCV64_DEFS_H
6 #define _SYSTEM_ARCH_RISCV64_DEFS_H
7 
8 
9 #include <SupportDefs.h>
10 
11 #define B_ALWAYS_INLINE __attribute__((always_inline)) inline
12 
13 
14 #ifdef __cplusplus
15 
16 enum {
17 	modeU = 0,
18 	modeS = 1,
19 	modeM = 3,
20 };
21 
22 // fs, xs
23 enum {
24 	extStatusOff     = 0,
25 	extStatusInitial = 1,
26 	extStatusClean   = 2,
27 	extStatusDirty   = 3,
28 };
29 
30 union MstatusReg {
31 	struct {
32 		uint64 ie:      4; // interrupt enable
33 		uint64 pie:     4; // previous interrupt enable
34 		uint64 spp:     1; // previous mode (supervisor)
35 		uint64 unused1: 2;
36 		uint64 mpp:     2; // previous mode (machine)
37 		uint64 fs:      2; // FPU status
38 		uint64 xs:      2; // extensions status
39 		uint64 mprv:    1; // modify privilege
40 		uint64 sum:     1; // permit supervisor user memory access
41 		uint64 mxr:     1; // make executable readable
42 		uint64 tvm:     1; // trap virtual memory
43 		uint64 tw:      1; // timeout wait (trap WFI)
44 		uint64 tsr:     1; // trap SRET
45 		uint64 unused2: 9;
46 		uint64 uxl:     2; // U-mode XLEN
47 		uint64 sxl:     2; // S-mode XLEN
48 		uint64 unused3: 27;
49 		uint64 sd:      1; // status dirty
50 	};
51 	uint64 val;
52 };
53 
54 union SstatusReg {
55 	struct {
56 		uint64 ie:      2; // interrupt enable
57 		uint64 unused1: 2;
58 		uint64 pie:     2; // previous interrupt enable
59 		uint64 unused2: 2;
60 		uint64 spp:     1; // previous mode (supervisor)
61 		uint64 unused3: 4;
62 		uint64 fs:      2; // FPU status
63 		uint64 xs:      2; // extensions status
64 		uint64 unused4: 1;
65 		uint64 sum:     1; // permit supervisor user memory access
66 		uint64 mxr:     1; // make executable readable
67 		uint64 unused5: 12;
68 		uint64 uxl:     2; // U-mode XLEN
69 		uint64 unused6: 29;
70 		uint64 sd:      1; // status dirty
71 	};
72 	uint64 val;
73 };
74 
75 enum {
76 	softInt    = 0,
77 	uSoftInt   = softInt + modeU,
78 	sSoftInt   = softInt + modeS,
79 	mSoftInt   = softInt + modeM,
80 	timerInt   = 4,
81 	uTimerInt  = timerInt + modeU,
82 	sTimerInt  = timerInt + modeS,
83 	mTimerInt  = timerInt + modeM,
84 	externInt  = 8,
85 	uExternInt = externInt + modeU,
86 	sExternInt = externInt + modeS,
87 	mExternInt = externInt + modeM,
88 };
89 
90 enum {
91 	causeInterrupt        = 1ULL << 63, // rest bits are interrupt number
92 	causeExecMisalign     = 0,
93 	causeExecAccessFault  = 1,
94 	causeIllegalInst      = 2,
95 	causeBreakpoint       = 3,
96 	causeLoadMisalign     = 4,
97 	causeLoadAccessFault  = 5,
98 	causeStoreMisalign    = 6,
99 	causeStoreAccessFault = 7,
100 	causeECall            = 8,
101 	causeUEcall           = causeECall + modeU,
102 	causeSEcall           = causeECall + modeS,
103 	causeMEcall           = causeECall + modeM,
104 	causeExecPageFault    = 12,
105 	causeLoadPageFault    = 13,
106 	causeStorePageFault   = 15,
107 };
108 
109 // physical memory protection
110 enum {
111 	pmpR = 0,
112 	pmpW = 1,
113 	pmpX = 2,
114 };
115 
116 enum {
117 	// naturally aligned power of two
118 	pmpMatchNapot = 3 << 3,
119 };
120 
121 enum {
122 	pageBits = 12,
123 	pteCount = 512,
124 	pteIdxBits = 9,
125 };
126 
127 enum {
128 	pteValid    = 0,
129 	pteRead     = 1,
130 	pteWrite    = 2,
131 	pteExec     = 3,
132 	pteUser     = 4,
133 	pteGlobal   = 5,
134 	pteAccessed = 6,
135 	pteDirty    = 7,
136 };
137 
138 union Pte {
139 	struct {
140 		uint64 flags:     8;
141 		uint64 rsw:       2;
142 		uint64 ppn:      44;
143 		uint64 reserved: 10;
144 	};
145 	uint64 val;
146 };
147 
148 enum {
149 	satpModeBare =  0,
150 	satpModeSv39 =  8,
151 	satpModeSv48 =  9,
152 	satpModeSv57 = 10,
153 	satpModeSv64 = 11,
154 };
155 
156 union SatpReg {
157 	struct {
158 		uint64 ppn:  44;
159 		uint64 asid: 16;
160 		uint64 mode:  4;
161 	};
162 	uint64 val;
163 };
164 
165 static B_ALWAYS_INLINE uint64 VirtAdrPte(uint64 physAdr, uint32 level)
166 {
167 	return (physAdr >> (pageBits + pteIdxBits*level)) % (1 << pteIdxBits);
168 }
169 
170 static B_ALWAYS_INLINE uint64 VirtAdrOfs(uint64 physAdr)
171 {
172 	return physAdr % PAGESIZE;
173 }
174 
175 #define CSR_REG_MACRO(Name, value) \
176 	static B_ALWAYS_INLINE uint64 Name() { \
177 		uint64 x; asm volatile("csrr %0, " #value : "=r" (x)); return x;} \
178 	static B_ALWAYS_INLINE void Set##Name(uint64 x) { \
179 		asm volatile("csrw " #value ", %0" : : "r" (x));} \
180 	static B_ALWAYS_INLINE void SetBits##Name(uint64 x) { \
181 		asm volatile("csrs " #value ", %0" : : "r" (x));} \
182 	static B_ALWAYS_INLINE void ClearBits##Name(uint64 x) { \
183 		asm volatile("csrc " #value ", %0" : : "r" (x));} \
184 	static B_ALWAYS_INLINE uint64 GetAndSetBits##Name(uint64 x) { \
185 		uint64 res; \
186 		asm volatile("csrrs %0, " #value ", %1" : "=r" (res) : "r" (x)); \
187 		return res; \
188 	} \
189 	static B_ALWAYS_INLINE uint64 GetAndClearBits##Name(uint64 x) { \
190 		uint64 res; \
191 		asm volatile("csrrc %0, " #value ", %1" : "=r" (res) : "r" (x)); \
192 		return res; \
193 	} \
194 
195 // CPU core ID
196 CSR_REG_MACRO(Mhartid, mhartid)
197 
198 // status register
199 CSR_REG_MACRO(Mstatus, mstatus)
200 CSR_REG_MACRO(Sstatus, sstatus)
201 
202 // exception program counter
203 CSR_REG_MACRO(Mepc, mepc)
204 CSR_REG_MACRO(Sepc, sepc)
205 
206 // interrupt pending
207 CSR_REG_MACRO(Mip, mip)
208 CSR_REG_MACRO(Sip, sip)
209 
210 // interrupt enable
211 CSR_REG_MACRO(Mie, mie)
212 CSR_REG_MACRO(Sie, sie)
213 
214 // exception delegation
215 CSR_REG_MACRO(Medeleg, medeleg)
216 // interrupt delegation
217 CSR_REG_MACRO(Mideleg, mideleg)
218 
219 // trap vector, 2 low bits: mode
220 CSR_REG_MACRO(Mtvec, mtvec)
221 CSR_REG_MACRO(Stvec, stvec)
222 
223 // address translation and protection (pointer to page table and flags)
224 CSR_REG_MACRO(Satp, satp)
225 
226 // scratch register
227 CSR_REG_MACRO(Mscratch, mscratch)
228 CSR_REG_MACRO(Sscratch, sscratch)
229 
230 // trap cause
231 CSR_REG_MACRO(Mcause, mcause)
232 CSR_REG_MACRO(Scause, scause)
233 
234 // trap value
235 CSR_REG_MACRO(Mtval, mtval)
236 CSR_REG_MACRO(Stval, stval)
237 
238 // machine-mode counter enable
239 CSR_REG_MACRO(Mcounteren, mcounteren)
240 
241 // cycle counter
242 CSR_REG_MACRO(CpuMcycle, mcycle)
243 CSR_REG_MACRO(CpuCycle, cycle)
244 // monotonic timer
245 CSR_REG_MACRO(CpuTime, time)
246 
247 // physical memory protection
248 CSR_REG_MACRO(Pmpaddr0, pmpaddr0)
249 CSR_REG_MACRO(Pmpcfg0, pmpcfg0)
250 
251 // flush the TLB
252 static B_ALWAYS_INLINE void FlushTlbAll() {
253 	asm volatile("sfence.vma" : : : "memory");}
254 static B_ALWAYS_INLINE void FlushTlbPage(uint64 x) {
255 	asm volatile("sfence.vma %0" : : "r" (x) : "memory");}
256 static B_ALWAYS_INLINE void FlushTlbAllAsid(uint64 asid) {
257 	asm volatile("sfence.vma x0, %0" : : "r" (asid) : "memory");}
258 static B_ALWAYS_INLINE void FlushTlbPageAsid(uint64 page, uint64 asid) {
259 	asm volatile("sfence.vma %0, %0" : : "r" (page), "r" (asid) : "memory");}
260 
261 // flush instruction cache
262 static B_ALWAYS_INLINE void FenceI() {
263 	asm volatile("fence.i" : : : "memory");}
264 
265 static B_ALWAYS_INLINE uint64 Sp() {
266 	uint64 x; asm volatile("mv %0, sp" : "=r" (x)); return x;}
267 static B_ALWAYS_INLINE void SetSp(uint64 x) {
268 	asm volatile("mv sp, %0" : : "r" (x));}
269 static B_ALWAYS_INLINE uint64 Fp() {
270 	uint64 x; asm volatile("mv %0, fp" : "=r" (x)); return x;}
271 static B_ALWAYS_INLINE void SetFp(uint64 x) {
272 	asm volatile("mv fp, %0" : : "r" (x));}
273 static B_ALWAYS_INLINE uint64 Tp() {
274 	uint64 x; asm volatile("mv %0, tp" : "=r" (x)); return x;}
275 static B_ALWAYS_INLINE void SetTp(uint64 x) {
276 	asm volatile("mv tp, %0" : : "r" (x));}
277 static B_ALWAYS_INLINE uint64 Ra() {
278 	uint64 x; asm volatile("mv %0, ra" : "=r" (x)); return x;}
279 static B_ALWAYS_INLINE void SetRa(uint64 x) {
280 	asm volatile("mv ra, %0" : : "r" (x));}
281 
282 static B_ALWAYS_INLINE void Ecall() {asm volatile("ecall");}
283 
284 // Wait for interrupts, reduce CPU load when inactive.
285 static B_ALWAYS_INLINE void Wfi() {asm volatile("wfi");}
286 
287 static B_ALWAYS_INLINE void Mret() {asm volatile("mret");}
288 static B_ALWAYS_INLINE void Sret() {asm volatile("sret");}
289 
290 #endif // __cplusplus
291 
292 
293 #define SPINLOCK_PAUSE()	do {} while (false)
294 
295 
296 #endif	/* _SYSTEM_ARCH_RISCV64_DEFS_H */
297 
298