xref: /haiku/headers/private/kernel/arch/arm64/arch_cpu.h (revision c2f3786ab0d58b5eb08b3d3fe28f7eb85c40027f)
1 /*
2  * Copyright 2018, Jaroslaw Pelczar <jarek@jpelczar.com>
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef _KERNEL_ARCH_ARM64_ARCH_CPU_H_
6 #define _KERNEL_ARCH_ARM64_ARCH_CPU_H_
7 
8 
9 #define CPU_MAX_CACHE_LEVEL 	8
10 #define CACHE_LINE_SIZE 		64
11 
12 // TODO: These will require a real implementation when PAN is enabled
13 #define arch_cpu_enable_user_access()
14 #define arch_cpu_disable_user_access()
15 
16 #include <kernel/arch/arm64/arm_registers.h>
17 
18 #ifndef _ASSEMBLER
19 
20 #include <arch/arm64/arch_thread_types.h>
21 #include <kernel.h>
22 
23 #define arm64_sev()  		__asm__ __volatile__("sev" : : : "memory")
24 #define arm64_wfe()  		__asm__ __volatile__("wfe" : : : "memory")
25 #define arm64_dsb()  		__asm__ __volatile__("dsb" : : : "memory")
26 #define arm64_dmb()  		__asm__ __volatile__("dmb" : : : "memory")
27 #define arm64_isb()  		__asm__ __volatile__("isb" : : : "memory")
28 #define arm64_nop()  		__asm__ __volatile__("nop" : : : "memory")
29 #define arm64_wfi()  		__asm__ __volatile__("wfi" : : : "memory")
30 #define arm64_yield() 	__asm__ __volatile__("yield" : : : "memory")
31 
32 /* Extract CPU affinity levels 0-3 */
33 #define	CPU_AFF0(mpidr)	(u_int)(((mpidr) >> 0) & 0xff)
34 #define	CPU_AFF1(mpidr)	(u_int)(((mpidr) >> 8) & 0xff)
35 #define	CPU_AFF2(mpidr)	(u_int)(((mpidr) >> 16) & 0xff)
36 #define	CPU_AFF3(mpidr)	(u_int)(((mpidr) >> 32) & 0xff)
37 #define	CPU_AFF0_MASK	0xffUL
38 #define	CPU_AFF1_MASK	0xff00UL
39 #define	CPU_AFF2_MASK	0xff0000UL
40 #define	CPU_AFF3_MASK	0xff00000000UL
41 #define	CPU_AFF_MASK	(CPU_AFF0_MASK | CPU_AFF1_MASK | \
42     CPU_AFF2_MASK| CPU_AFF3_MASK)	/* Mask affinity fields in MPIDR_EL1 */
43 
44 
45 #define	CPU_IMPL_ARM		0x41
46 #define	CPU_IMPL_BROADCOM	0x42
47 #define	CPU_IMPL_CAVIUM		0x43
48 #define	CPU_IMPL_DEC		0x44
49 #define	CPU_IMPL_INFINEON	0x49
50 #define	CPU_IMPL_FREESCALE	0x4D
51 #define	CPU_IMPL_NVIDIA		0x4E
52 #define	CPU_IMPL_APM		0x50
53 #define	CPU_IMPL_QUALCOMM	0x51
54 #define	CPU_IMPL_MARVELL	0x56
55 #define	CPU_IMPL_INTEL		0x69
56 
57 #define	CPU_PART_THUNDER	0x0A1
58 #define	CPU_PART_FOUNDATION	0xD00
59 #define	CPU_PART_CORTEX_A35	0xD04
60 #define	CPU_PART_CORTEX_A53	0xD03
61 #define	CPU_PART_CORTEX_A55	0xD05
62 #define	CPU_PART_CORTEX_A57	0xD07
63 #define	CPU_PART_CORTEX_A72	0xD08
64 #define	CPU_PART_CORTEX_A73	0xD09
65 #define	CPU_PART_CORTEX_A75	0xD0A
66 
67 #define	CPU_REV_THUNDER_1_0	0x00
68 #define	CPU_REV_THUNDER_1_1	0x01
69 
70 #define	CPU_IMPL(midr)	(((midr) >> 24) & 0xff)
71 #define	CPU_PART(midr)	(((midr) >> 4) & 0xfff)
72 #define	CPU_VAR(midr)	(((midr) >> 20) & 0xf)
73 #define	CPU_REV(midr)	(((midr) >> 0) & 0xf)
74 
75 #define	CPU_IMPL_TO_MIDR(val)	(((val) & 0xff) << 24)
76 #define	CPU_PART_TO_MIDR(val)	(((val) & 0xfff) << 4)
77 #define	CPU_VAR_TO_MIDR(val)	(((val) & 0xf) << 20)
78 #define	CPU_REV_TO_MIDR(val)	(((val) & 0xf) << 0)
79 
80 #define	CPU_IMPL_MASK	(0xff << 24)
81 #define	CPU_PART_MASK	(0xfff << 4)
82 #define	CPU_VAR_MASK	(0xf << 20)
83 #define	CPU_REV_MASK	(0xf << 0)
84 
85 #define	CPU_ID_RAW(impl, part, var, rev)		\
86     (CPU_IMPL_TO_MIDR((impl)) |				\
87     CPU_PART_TO_MIDR((part)) | CPU_VAR_TO_MIDR((var)) |	\
88     CPU_REV_TO_MIDR((rev)))
89 
90 #define	CPU_MATCH(mask, impl, part, var, rev)		\
91     (((mask) & PCPU_GET(midr)) ==			\
92     ((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
93 
94 #define	CPU_MATCH_RAW(mask, devid)			\
95     (((mask) & PCPU_GET(midr)) == ((mask) & (devid)))
96 
arm64_get_cyclecount(void)97 static inline uint64 arm64_get_cyclecount(void)
98 {
99 	return READ_SPECIALREG(cntvct_el0);
100 }
101 
102 #define	ADDRESS_TRANSLATE_FUNC(stage)					\
103 static inline uint64									\
104 arm64_address_translate_ ##stage (uint64 addr)		\
105 {														\
106 	uint64 ret;											\
107 														\
108 	__asm __volatile(									\
109 	    "at " __ARMREG_STRING(stage) ", %1 \n"					\
110 	    "mrs %0, par_el1" : "=r"(ret) : "r"(addr));		\
111 														\
112 	return (ret);										\
113 }
114 
115 
116 ADDRESS_TRANSLATE_FUNC(s1e0r)
117 ADDRESS_TRANSLATE_FUNC(s1e0w)
118 ADDRESS_TRANSLATE_FUNC(s1e1r)
119 ADDRESS_TRANSLATE_FUNC(s1e1w)
120 
121 
122 struct aarch64_fpu_state
123 {
124 	uint64 regs[32 * 2];
125 	uint64 fpsr;
126 	uint64 fpcr;
127 };
128 
129 
130 /* raw exception frames */
131 struct iframe {
132 	// return info
133 	uint64 elr;
134 	uint64 spsr;
135 	uint64 x[29];
136 	uint64 fp;
137 	uint64 lr;
138 	uint64 sp;
139 
140 	// exception info
141 	uint64 esr;
142 	uint64 far;
143 
144 	// fpu
145 	struct aarch64_fpu_state fpu;
146 };
147 
148 
149 #ifdef __cplusplus
150 namespace BKernel {
151 	struct Thread;
152 }  // namespace BKernel
153 
154 
155 typedef struct arch_cpu_info {
156 	uint32						mpidr;
157 	BKernel::Thread*			last_vfp_user;
158 } arch_cpu_info;
159 #endif
160 
161 
162 #ifdef __cplusplus
163 extern "C" {
164 #endif
165 
166 
arch_cpu_pause(void)167 static inline void arch_cpu_pause(void)
168 {
169 	arm64_yield();
170 }
171 
172 
arch_cpu_idle(void)173 static inline void arch_cpu_idle(void)
174 {
175 	arm64_wfi();
176 }
177 
178 
179 extern addr_t arm64_get_fp(void);
180 
181 
182 #ifdef __cplusplus
183 }
184 #endif
185 
186 #endif
187 
188 
189 #endif /* _KERNEL_ARCH_ARM64_ARCH_CPU_H_ */
190