xref: /haiku/headers/private/kernel/arch/arm64/arch_cpu.h (revision 4a55cc230cf7566cadcbb23b1928eefff8aea9a2)
1 /*
2  * Copyright 2018, Jaroslaw Pelczar <jarek@jpelczar.com>
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef _KERNEL_ARCH_ARM64_ARCH_CPU_H_
6 #define _KERNEL_ARCH_ARM64_ARCH_CPU_H_
7 
8 
9 #define CPU_MAX_CACHE_LEVEL 	8
10 #define CACHE_LINE_SIZE 		64
11 
12 #define set_ac()
13 #define clear_ac()
14 
15 #include <kernel/arch/arm64/arm_registers.h>
16 
17 #ifndef _ASSEMBLER
18 
19 #include <arch/arm64/arch_thread_types.h>
20 #include <kernel.h>
21 
22 #define arm64_sev()  		__asm__ __volatile__("sev" : : : "memory")
23 #define arm64_wfe()  		__asm__ __volatile__("wfe" : : : "memory")
24 #define arm64_dsb()  		__asm__ __volatile__("dsb" : : : "memory")
25 #define arm64_dmb()  		__asm__ __volatile__("dmb" : : : "memory")
26 #define arm64_isb()  		__asm__ __volatile__("isb" : : : "memory")
27 #define arm64_nop()  		__asm__ __volatile__("nop" : : : "memory")
28 #define arm64_yield() 	__asm__ __volatile__("yield" : : : "memory")
29 
30 /* Extract CPU affinity levels 0-3 */
31 #define	CPU_AFF0(mpidr)	(u_int)(((mpidr) >> 0) & 0xff)
32 #define	CPU_AFF1(mpidr)	(u_int)(((mpidr) >> 8) & 0xff)
33 #define	CPU_AFF2(mpidr)	(u_int)(((mpidr) >> 16) & 0xff)
34 #define	CPU_AFF3(mpidr)	(u_int)(((mpidr) >> 32) & 0xff)
35 #define	CPU_AFF0_MASK	0xffUL
36 #define	CPU_AFF1_MASK	0xff00UL
37 #define	CPU_AFF2_MASK	0xff0000UL
38 #define	CPU_AFF3_MASK	0xff00000000UL
39 #define	CPU_AFF_MASK	(CPU_AFF0_MASK | CPU_AFF1_MASK | \
40     CPU_AFF2_MASK| CPU_AFF3_MASK)	/* Mask affinity fields in MPIDR_EL1 */
41 
42 
43 #define	CPU_IMPL_ARM		0x41
44 #define	CPU_IMPL_BROADCOM	0x42
45 #define	CPU_IMPL_CAVIUM		0x43
46 #define	CPU_IMPL_DEC		0x44
47 #define	CPU_IMPL_INFINEON	0x49
48 #define	CPU_IMPL_FREESCALE	0x4D
49 #define	CPU_IMPL_NVIDIA		0x4E
50 #define	CPU_IMPL_APM		0x50
51 #define	CPU_IMPL_QUALCOMM	0x51
52 #define	CPU_IMPL_MARVELL	0x56
53 #define	CPU_IMPL_INTEL		0x69
54 
55 #define	CPU_PART_THUNDER	0x0A1
56 #define	CPU_PART_FOUNDATION	0xD00
57 #define	CPU_PART_CORTEX_A35	0xD04
58 #define	CPU_PART_CORTEX_A53	0xD03
59 #define	CPU_PART_CORTEX_A55	0xD05
60 #define	CPU_PART_CORTEX_A57	0xD07
61 #define	CPU_PART_CORTEX_A72	0xD08
62 #define	CPU_PART_CORTEX_A73	0xD09
63 #define	CPU_PART_CORTEX_A75	0xD0A
64 
65 #define	CPU_REV_THUNDER_1_0	0x00
66 #define	CPU_REV_THUNDER_1_1	0x01
67 
68 #define	CPU_IMPL(midr)	(((midr) >> 24) & 0xff)
69 #define	CPU_PART(midr)	(((midr) >> 4) & 0xfff)
70 #define	CPU_VAR(midr)	(((midr) >> 20) & 0xf)
71 #define	CPU_REV(midr)	(((midr) >> 0) & 0xf)
72 
73 #define	CPU_IMPL_TO_MIDR(val)	(((val) & 0xff) << 24)
74 #define	CPU_PART_TO_MIDR(val)	(((val) & 0xfff) << 4)
75 #define	CPU_VAR_TO_MIDR(val)	(((val) & 0xf) << 20)
76 #define	CPU_REV_TO_MIDR(val)	(((val) & 0xf) << 0)
77 
78 #define	CPU_IMPL_MASK	(0xff << 24)
79 #define	CPU_PART_MASK	(0xfff << 4)
80 #define	CPU_VAR_MASK	(0xf << 20)
81 #define	CPU_REV_MASK	(0xf << 0)
82 
83 #define	CPU_ID_RAW(impl, part, var, rev)		\
84     (CPU_IMPL_TO_MIDR((impl)) |				\
85     CPU_PART_TO_MIDR((part)) | CPU_VAR_TO_MIDR((var)) |	\
86     CPU_REV_TO_MIDR((rev)))
87 
88 #define	CPU_MATCH(mask, impl, part, var, rev)		\
89     (((mask) & PCPU_GET(midr)) ==			\
90     ((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
91 
92 #define	CPU_MATCH_RAW(mask, devid)			\
93     (((mask) & PCPU_GET(midr)) == ((mask) & (devid)))
94 
95 static inline uint64 arm64_get_cyclecount(void)
96 {
97 	return READ_SPECIALREG(cntvct_el0);
98 }
99 
100 #define	ADDRESS_TRANSLATE_FUNC(stage)					\
101 static inline uint64									\
102 arm64_address_translate_ ##stage (uint64 addr)		\
103 {														\
104 	uint64 ret;											\
105 														\
106 	__asm __volatile(									\
107 	    "at " __ARMREG_STRING(stage) ", %1 \n"					\
108 	    "mrs %0, par_el1" : "=r"(ret) : "r"(addr));		\
109 														\
110 	return (ret);										\
111 }
112 
113 
114 ADDRESS_TRANSLATE_FUNC(s1e0r)
115 ADDRESS_TRANSLATE_FUNC(s1e0w)
116 ADDRESS_TRANSLATE_FUNC(s1e1r)
117 ADDRESS_TRANSLATE_FUNC(s1e1w)
118 
119 
120 struct aarch64_fpu_state
121 {
122 	uint64 regs[32 * 2];
123 	uint64 fpsr;
124 	uint64 fpcr;
125 };
126 
127 
128 /* raw exception frames */
129 struct iframe {
130 	// return info
131 	uint64 elr;
132 	uint64 spsr;
133 	uint64 x[20];
134 	uint64 lr;
135 	uint64 sp;
136 	uint64 fp;
137 
138 	// exception info
139 	uint64 esr;
140 	uint64 far;
141 };
142 
143 
144 #ifdef __cplusplus
145 namespace BKernel {
146 	struct Thread;
147 }  // namespace BKernel
148 
149 
150 typedef struct arch_cpu_info {
151 	uint32						mpidr;
152 	BKernel::Thread*			last_vfp_user;
153 } arch_cpu_info;
154 #endif
155 
156 
157 #ifdef __cplusplus
158 extern "C" {
159 #endif
160 
161 
162 static inline void arch_cpu_pause(void)
163 {
164 	arm64_yield();
165 }
166 
167 
168 static inline void arch_cpu_idle(void)
169 {
170 	arm64_yield();
171 }
172 
173 
174 extern addr_t arm64_get_fp(void);
175 
176 
177 #ifdef __cplusplus
178 }
179 #endif
180 
181 #endif
182 
183 
184 #endif /* _KERNEL_ARCH_ARM64_ARCH_CPU_H_ */
185