xref: /haiku/headers/private/system/arch/riscv64/arch_cpu_defs.h (revision 4c8e85b316c35a9161f5a1c50ad70bc91c83a76f)
1 /*
2  * Copyright 2021, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef _SYSTEM_ARCH_RISCV64_DEFS_H
6 #define _SYSTEM_ARCH_RISCV64_DEFS_H
7 
8 
9 #include <SupportDefs.h>
10 
11 #define B_ALWAYS_INLINE __attribute__((always_inline)) inline
12 
13 
14 #ifdef __cplusplus
15 
16 enum {
17 	modeU = 0,
18 	modeS = 1,
19 	modeM = 3,
20 };
21 
22 // fs, xs
23 enum {
24 	extStatusOff     = 0,
25 	extStatusInitial = 1,
26 	extStatusClean   = 2,
27 	extStatusDirty   = 3,
28 };
29 
30 struct MstatusReg {
31 	union {
32 		struct {
33 			uint64 ie:      4; // interrupt enable
34 			uint64 pie:     4; // previous interrupt enable
35 			uint64 spp:     1; // previous mode (supervisor)
36 			uint64 unused1: 2;
37 			uint64 mpp:     2; // previous mode (machine)
38 			uint64 fs:      2; // FPU status
39 			uint64 xs:      2; // extensions status
40 			uint64 mprv:    1; // modify privelege
41 			uint64 sum:     1; // permit supervisor user memory access
42 			uint64 mxr:     1; // make executable readable
43 			uint64 tvm:     1; // trap virtual memory
44 			uint64 tw:      1; // timeout wait (trap WFI)
45 			uint64 tsr:     1; // trap SRET
46 			uint64 unused2: 9;
47 			uint64 uxl:     2; // U-mode XLEN
48 			uint64 sxl:     2; // S-mode XLEN
49 			uint64 unused3: 27;
50 			uint64 sd:      1; // status dirty
51 		};
52 		uint64 val;
53 	};
54 
55 	MstatusReg() {}
56 	MstatusReg(uint64 val): val(val) {}
57 };
58 
59 struct SstatusReg {
60 	union {
61 		struct {
62 			uint64 ie:      2; // interrupt enable
63 			uint64 unused1: 2;
64 			uint64 pie:     2; // previous interrupt enable
65 			uint64 unused2: 2;
66 			uint64 spp:     1; // previous mode (supervisor)
67 			uint64 unused3: 4;
68 			uint64 fs:      2; // FPU status
69 			uint64 xs:      2; // extensions status
70 			uint64 unused4: 1;
71 			uint64 sum:     1; // permit supervisor user memory access
72 			uint64 mxr:     1; // make executable readable
73 			uint64 unused5: 12;
74 			uint64 uxl:     2; // U-mode XLEN
75 			uint64 unused6: 29;
76 			uint64 sd:      1; // status dirty
77 		};
78 		uint64 val;
79 	};
80 
81 	SstatusReg() {}
82 	SstatusReg(uint64 val): val(val) {}
83 };
84 
85 enum {
86 	softInt    = 0,
87 	uSoftInt   = softInt + modeU,
88 	sSoftInt   = softInt + modeS,
89 	mSoftInt   = softInt + modeM,
90 	timerInt   = 4,
91 	uTimerInt  = timerInt + modeU,
92 	sTimerInt  = timerInt + modeS,
93 	mTimerInt  = timerInt + modeM,
94 	externInt  = 8,
95 	uExternInt = externInt + modeU,
96 	sExternInt = externInt + modeS,
97 	mExternInt = externInt + modeM,
98 };
99 
100 enum {
101 	causeInterrupt        = 1ULL << 63, // rest bits are interrupt number
102 	causeExecMisalign     = 0,
103 	causeExecAccessFault  = 1,
104 	causeIllegalInst      = 2,
105 	causeBreakpoint       = 3,
106 	causeLoadMisalign     = 4,
107 	causeLoadAccessFault  = 5,
108 	causeStoreMisalign    = 6,
109 	causeStoreAccessFault = 7,
110 	causeECall            = 8,
111 	causeUEcall           = causeECall + modeU,
112 	causeSEcall           = causeECall + modeS,
113 	causeMEcall           = causeECall + modeM,
114 	causeExecPageFault    = 12,
115 	causeLoadPageFault    = 13,
116 	causeStorePageFault   = 15,
117 };
118 
119 // physical memory protection
120 enum {
121 	pmpR = 0,
122 	pmpW = 1,
123 	pmpX = 2,
124 };
125 
126 enum {
127 	// naturally aligned power of two
128 	pmpMatchNapot = 3 << 3,
129 };
130 
131 enum {
132 	pageSize = 4096,
133 	pageBits = 12,
134 	pteCount = 512,
135 	pteIdxBits = 9,
136 };
137 
138 enum {
139 	pteValid    = 0,
140 	pteRead     = 1,
141 	pteWrite    = 2,
142 	pteExec     = 3,
143 	pteUser     = 4,
144 	pteGlobal   = 5,
145 	pteAccessed = 6,
146 	pteDirty    = 7,
147 };
148 
149 struct Pte {
150 	union {
151 		struct {
152 			uint64 flags:     8;
153 			uint64 rsw:       2;
154 			uint64 ppn:      44;
155 			uint64 reserved: 10;
156 		};
157 		uint64 val;
158 	};
159 
160 	Pte() {}
161 	Pte(uint64 val): val(val) {}
162 };
163 
164 enum {
165 	satpModeBare =  0,
166 	satpModeSv39 =  8,
167 	satpModeSv48 =  9,
168 	satpModeSv57 = 10,
169 	satpModeSv64 = 11,
170 };
171 
172 struct SatpReg {
173 	union {
174 		struct {
175 			uint64 ppn:  44;
176 			uint64 asid: 16;
177 			uint64 mode:  4;
178 		};
179 		uint64 val;
180 	};
181 
182 	SatpReg() {}
183 	SatpReg(uint64 val): val(val) {}
184 };
185 
186 static B_ALWAYS_INLINE uint64 VirtAdrPte(uint64 physAdr, uint32 level)
187 {
188 	return (physAdr >> (pageBits + pteIdxBits*level)) % (1 << pteIdxBits);
189 }
190 
191 static B_ALWAYS_INLINE uint64 VirtAdrOfs(uint64 physAdr)
192 {
193 	return physAdr % pageSize;
194 }
195 
196 // CPU core ID
197 static B_ALWAYS_INLINE uint64 Mhartid() {
198 	uint64 x; asm volatile("csrr %0, mhartid" : "=r" (x)); return x;}
199 
200 // status register
201 static B_ALWAYS_INLINE uint64 Mstatus() {
202 	uint64 x; asm volatile("csrr %0, mstatus" : "=r" (x)); return x;}
203 static B_ALWAYS_INLINE void SetMstatus(uint64 x) {
204 	asm volatile("csrw mstatus, %0" : : "r" (x));}
205 static B_ALWAYS_INLINE uint64 Sstatus() {
206 	uint64 x; asm volatile("csrr %0, sstatus" : "=r" (x)); return x;}
207 static B_ALWAYS_INLINE void SetSstatus(uint64 x) {
208 	asm volatile("csrw sstatus, %0" : : "r" (x));}
209 
210 // exception program counter
211 static B_ALWAYS_INLINE uint64 Mepc() {
212 	uint64 x; asm volatile("csrr %0, mepc" : "=r" (x)); return x;}
213 static B_ALWAYS_INLINE void SetMepc(uint64 x) {
214 	asm volatile("csrw mepc, %0" : : "r" (x));}
215 static B_ALWAYS_INLINE uint64 Sepc() {
216 	uint64 x; asm volatile("csrr %0, sepc" : "=r" (x)); return x;}
217 static B_ALWAYS_INLINE void SetSepc(uint64 x) {
218 	asm volatile("csrw sepc, %0" : : "r" (x));}
219 
220 // interrupt pending
221 static B_ALWAYS_INLINE uint64 Mip() {
222 	uint64 x; asm volatile("csrr %0, mip" : "=r" (x)); return x;}
223 static B_ALWAYS_INLINE void SetMip(uint64 x) {
224 	asm volatile("csrw mip, %0" : : "r" (x));}
225 static B_ALWAYS_INLINE uint64 Sip() {
226 	uint64 x; asm volatile("csrr %0, sip" : "=r" (x)); return x;}
227 static B_ALWAYS_INLINE void SetSip(uint64 x) {
228 	asm volatile("csrw sip, %0" : : "r" (x));}
229 
230 // interrupt enable
231 static B_ALWAYS_INLINE uint64 Sie() {
232 	uint64 x; asm volatile("csrr %0, sie" : "=r" (x)); return x;}
233 static B_ALWAYS_INLINE void SetSie(uint64 x) {
234 	asm volatile("csrw sie, %0" : : "r" (x));}
235 static B_ALWAYS_INLINE uint64 Mie() {
236 	uint64 x; asm volatile("csrr %0, mie" : "=r" (x)); return x;}
237 static B_ALWAYS_INLINE void SetMie(uint64 x) {
238 	asm volatile("csrw mie, %0" : : "r" (x));}
239 
240 // exception delegation
241 static B_ALWAYS_INLINE uint64 Medeleg() {
242 	uint64 x; asm volatile("csrr %0, medeleg" : "=r" (x)); return x;}
243 static B_ALWAYS_INLINE void SetMedeleg(uint64 x) {
244 	asm volatile("csrw medeleg, %0" : : "r" (x));}
245 // interrupt delegation
246 static B_ALWAYS_INLINE uint64 Mideleg() {
247 	uint64 x; asm volatile("csrr %0, mideleg" : "=r" (x)); return x;}
248 static B_ALWAYS_INLINE void SetMideleg(uint64 x) {
249 	asm volatile("csrw mideleg, %0" : : "r" (x));}
250 
251 // trap vector, 2 low bits: mode
252 static B_ALWAYS_INLINE uint64 Mtvec() {
253 	uint64 x; asm volatile("csrr %0, mtvec" : "=r" (x)); return x;}
254 static B_ALWAYS_INLINE void SetMtvec(uint64 x) {
255 	asm volatile("csrw mtvec, %0" : : "r" (x));}
256 static B_ALWAYS_INLINE uint64 Stvec() {
257 	uint64 x; asm volatile("csrr %0, stvec" : "=r" (x)); return x;}
258 static B_ALWAYS_INLINE void SetStvec(uint64 x) {
259 	asm volatile("csrw stvec, %0" : : "r" (x));}
260 
261 // address translation and protection (pointer to page table and flags)
262 static B_ALWAYS_INLINE uint64 Satp() {
263 	uint64 x; asm volatile("csrr %0, satp" : "=r" (x)); return x;}
264 static B_ALWAYS_INLINE void SetSatp(uint64 x) {
265 	asm volatile("csrw satp, %0" : : "r" (x) : "memory");}
266 
267 // scratch register
268 static B_ALWAYS_INLINE uint64 Mscratch() {
269 	uint64 x; asm volatile("csrr %0, mscratch" : "=r" (x)); return x;}
270 static B_ALWAYS_INLINE void SetMscratch(uint64 x) {
271 	asm volatile("csrw mscratch, %0" : : "r" (x));}
272 static B_ALWAYS_INLINE uint64 Sscratch() {
273 	uint64 x; asm volatile("csrr %0, sscratch" : "=r" (x)); return x;}
274 static B_ALWAYS_INLINE void SetSscratch(uint64 x) {
275 	asm volatile("csrw sscratch, %0" : : "r" (x));}
276 
277 // trap cause
278 static B_ALWAYS_INLINE uint64 Mcause() {
279 	uint64 x; asm volatile("csrr %0, mcause" : "=r" (x)); return x;}
280 static B_ALWAYS_INLINE void SetMcause(uint64 x) {
281 	asm volatile("csrw mcause, %0" : : "r" (x));}
282 static B_ALWAYS_INLINE uint64 Scause() {
283 	uint64 x; asm volatile("csrr %0, scause" : "=r" (x)); return x;}
284 static B_ALWAYS_INLINE void SetScause(uint64 x) {
285 	asm volatile("csrw scause, %0" : : "r" (x));}
286 
287 // trap value
288 static B_ALWAYS_INLINE uint64 Mtval() {
289 	uint64 x; asm volatile("csrr %0, mtval" : "=r" (x)); return x;}
290 static B_ALWAYS_INLINE void SetMtval(uint64 x) {
291 	asm volatile("csrw mtval, %0" : : "r" (x));}
292 static B_ALWAYS_INLINE uint64 Stval() {
293 	uint64 x; asm volatile("csrr %0, stval" : "=r" (x)); return x;}
294 static B_ALWAYS_INLINE void SetStval(uint64 x) {
295 	asm volatile("csrw stval, %0" : : "r" (x));}
296 
297 // machine-mode counter enable
298 static B_ALWAYS_INLINE uint64 Mcounteren() {
299 	uint64 x; asm volatile("csrr %0, mcounteren" : "=r" (x)); return x;}
300 static B_ALWAYS_INLINE void SetMcounteren(uint64 x) {
301 	asm volatile("csrw mcounteren, %0" : : "r" (x));}
302 
303 // cycle counter
304 static B_ALWAYS_INLINE uint64 CpuMcycle() {
305 	uint64 x; asm volatile("csrr %0, mcycle" : "=r" (x)); return x;}
306 static B_ALWAYS_INLINE uint64 CpuCycle() {
307 	uint64 x; asm volatile("csrr %0, cycle" : "=r" (x)); return x;}
308 // monotonic timer
309 static B_ALWAYS_INLINE uint64 CpuTime() {
310 	uint64 x; asm volatile("csrr %0, time" : "=r" (x)); return x;}
311 
312 // physical memory protection
313 static B_ALWAYS_INLINE uint64 Pmpaddr0() {
314 	uint64 x; asm volatile("csrr %0, pmpaddr0" : "=r" (x)); return x;}
315 static B_ALWAYS_INLINE uint64 Pmpcfg0() {
316 	uint64 x; asm volatile("csrr %0, pmpcfg0" : "=r" (x)); return x;}
317 static B_ALWAYS_INLINE void SetPmpaddr0(uint64 x) {
318 	asm volatile("csrw pmpaddr0, %0" : : "r" (x));}
319 static B_ALWAYS_INLINE void SetPmpcfg0(uint64 x) {
320 	asm volatile("csrw pmpcfg0, %0" : : "r" (x));}
321 
322 // flush the TLB
323 static B_ALWAYS_INLINE void FlushTlbAll() {
324 	asm volatile("sfence.vma" : : : "memory");}
325 static B_ALWAYS_INLINE void FlushTlbPage(uint64 x) {
326 	asm volatile("sfence.vma %0" : : "r" (x) : "memory");}
327 static B_ALWAYS_INLINE void FlushTlbAllAsid(uint64 asid) {
328 	asm volatile("sfence.vma x0, %0" : : "r" (asid) : "memory");}
329 static B_ALWAYS_INLINE void FlushTlbPageAsid(uint64 page, uint64 asid) {
330 	asm volatile("sfence.vma %0, %0" : : "r" (page), "r" (asid) : "memory");}
331 
332 // flush instruction cache
333 static B_ALWAYS_INLINE void FenceI() {
334 	asm volatile("fence.i" : : : "memory");}
335 
336 static B_ALWAYS_INLINE uint64 Sp() {
337 	uint64 x; asm volatile("mv %0, sp" : "=r" (x)); return x;}
338 static B_ALWAYS_INLINE void SetSp(uint64 x) {
339 	asm volatile("mv sp, %0" : : "r" (x));}
340 static B_ALWAYS_INLINE uint64 Fp() {
341 	uint64 x; asm volatile("mv %0, fp" : "=r" (x)); return x;}
342 static B_ALWAYS_INLINE void SetFp(uint64 x) {
343 	asm volatile("mv fp, %0" : : "r" (x));}
344 static B_ALWAYS_INLINE uint64 Tp() {
345 	uint64 x; asm volatile("mv %0, tp" : "=r" (x)); return x;}
346 static B_ALWAYS_INLINE void SetTp(uint64 x) {
347 	asm volatile("mv tp, %0" : : "r" (x));}
348 static B_ALWAYS_INLINE uint64 Ra() {
349 	uint64 x; asm volatile("mv %0, ra" : "=r" (x)); return x;}
350 static B_ALWAYS_INLINE void SetRa(uint64 x) {
351 	asm volatile("mv ra, %0" : : "r" (x));}
352 
353 static B_ALWAYS_INLINE void Ecall() {asm volatile("ecall");}
354 
355 // Wait for interrupts, reduce CPU load when inactive.
356 static B_ALWAYS_INLINE void Wfi() {asm volatile("wfi");}
357 
358 static B_ALWAYS_INLINE void Mret() {asm volatile("mret");}
359 static B_ALWAYS_INLINE void Sret() {asm volatile("sret");}
360 
361 #endif // __cplusplus
362 
363 
364 #define SPINLOCK_PAUSE()	do {} while (false)
365 
366 
367 #endif	/* _SYSTEM_ARCH_RISCV64_DEFS_H */
368 
369