1 /*
2 * Copyright 2021-2022, Oliver Ruiz Dorantes. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 */
5
6 #include <efi/types.h>
7
8 #include <kernel/arch/arm64/arm_registers.h>
9 #include <kernel/arch/arm64/arch_pte.h>
10
11 #include <arch_kernel.h>
12
13
14 extern "C" void _arch_exception_loop(void);
15 extern "C" void _arch_exception_panic(const char* someString, uint64 someValue);
16
17 extern "C" uint64 _arch_transition_EL2_EL1(void);
18
19 extern "C" void arch_cache_disable(void);
20 extern "C" void arch_cache_enable(void);
21 extern "C" void _arch_cache_flush_invalidate_all(void);
22 extern "C" void _arch_mmu_invalidate_tlb_all(uint8 el);
23
24 extern "C" void _arch_cache_clean_poc(void);
25
26
27 static const uint8 kInvalidExceptionLevel = 0xFFu;
28
29
30 #define AARCH64_CHECK_ACCESS(operand, address) \
31 __asm __volatile("at " #operand ", %0" : : "r"((uint64_t)address))
32
33 #define AARCH64_BREAK(id) \
34 __asm __volatile("brk " #id)
35
36
arch_exception_level()37 static inline uint64 arch_exception_level()
38 {
39 return (READ_SPECIALREG(CurrentEL) >> 2);
40 }
41
42
43 // Check arch_cpu.h macro ADDRESS_TRANSLATE_FUNC(stage) for alternative implementation
arch_mmu_read_access(addr_t address)44 static inline bool arch_mmu_read_access(addr_t address) {
45
46 switch (arch_exception_level())
47 {
48 case 0:
49 AARCH64_CHECK_ACCESS(S1E0R, address);
50 break;
51 case 1:
52 AARCH64_CHECK_ACCESS(S1E1R, address);
53 break;
54 case 2:
55 AARCH64_CHECK_ACCESS(S1E2R, address);
56 break;
57 case 3:
58 AARCH64_CHECK_ACCESS(S1E3R, address);
59 break;
60 default:
61 return false;
62 }
63 return !(READ_SPECIALREG(PAR_EL1) & PAR_F);
64 }
65
66
arch_mmu_write_access(addr_t address)67 static inline bool arch_mmu_write_access(addr_t address) {
68
69 switch (arch_exception_level())
70 {
71 case 0:
72 AARCH64_CHECK_ACCESS(S1E0W, address);
73 break;
74 case 1:
75 AARCH64_CHECK_ACCESS(S1E1W, address);
76 break;
77 case 2:
78 AARCH64_CHECK_ACCESS(S1E2W, address);
79 break;
80 case 3:
81 AARCH64_CHECK_ACCESS(S1E3W, address);
82 break;
83 default:
84 return false;
85 }
86 return !(READ_SPECIALREG(PAR_EL1) & PAR_F);
87 }
88
89
90 static inline uint64 arch_mmu_base_register(bool kernel = false)
91 {
92 switch (arch_exception_level())
93 {
94 case 1:
95 if (kernel) {
96 return READ_SPECIALREG(TTBR1_EL1);
97 } else {
98 return READ_SPECIALREG(TTBR0_EL1);
99 }
100 case 2:
101 if (kernel) {
102 /* This register is present only when
103 * FEAT_VHE is implemented. Otherwise,
104 * direct accesses to TTBR1_EL2 are UNDEFINED.
105 */
106 return READ_SPECIALREG(TTBR0_EL2); // TTBR1_EL2
107 } else {
108 return READ_SPECIALREG(TTBR0_EL2);
109 }
110 case 3:
111 return READ_SPECIALREG(TTBR0_EL3);
112 default:
113 return false;
114 }
115 }
116
117
_arch_mmu_get_sctlr()118 static inline uint64 _arch_mmu_get_sctlr()
119 {
120 switch (arch_exception_level())
121 {
122 case 1:
123 return READ_SPECIALREG(SCTLR_EL1);
124 case 2:
125 return READ_SPECIALREG(SCTLR_EL2);
126 case 3:
127 return READ_SPECIALREG(SCTLR_EL3);
128 default:
129 return false;
130 }
131 }
132
133
_arch_mmu_set_sctlr(uint64 sctlr)134 static inline void _arch_mmu_set_sctlr(uint64 sctlr)
135 {
136 switch (arch_exception_level())
137 {
138 case 1:
139 WRITE_SPECIALREG(SCTLR_EL1, sctlr);
140 break;
141 case 2:
142 WRITE_SPECIALREG(SCTLR_EL2, sctlr);
143 break;
144 case 3:
145 WRITE_SPECIALREG(SCTLR_EL3, sctlr);
146 break;
147 }
148 }
149
150
arch_mmu_enabled()151 static inline bool arch_mmu_enabled()
152 {
153 return _arch_mmu_get_sctlr() & SCTLR_M;
154 }
155
156
arch_mmu_cache_enabled()157 static inline bool arch_mmu_cache_enabled()
158 {
159 return _arch_mmu_get_sctlr() & SCTLR_C;
160 }
161
162
163 static inline uint64 _arch_mmu_get_tcr(int el = kInvalidExceptionLevel) {
164
165 if (el == kInvalidExceptionLevel)
166 el = arch_exception_level();
167
168 switch (el)
169 {
170 case 1:
171 return READ_SPECIALREG(TCR_EL1);
172 case 2:
173 return READ_SPECIALREG(TCR_EL2);
174 case 3:
175 return READ_SPECIALREG(TCR_EL3);
176 default:
177 return 0;
178 }
179 }
180
181 // TODO: move to arm_registers.h
182 static constexpr uint64 TG_MASK = 0x3u;
183 static constexpr uint64 TG_4KB = 0x0u;
184 static constexpr uint64 TG_16KB = 0x2u;
185 static constexpr uint64 TG_64KB = 0x1u;
186
187 static constexpr uint64 TxSZ_MASK = (1 << 6) - 1;
188
189 static constexpr uint64 T0SZ_MASK = TxSZ_MASK;
190 static constexpr uint64 T1SZ_MASK = TxSZ_MASK << TCR_T1SZ_SHIFT;
191
192 static constexpr uint64 IPS_MASK = 0x7UL << TCR_IPS_SHIFT;
193
194 static constexpr uint64 TCR_EPD1_DISABLE = (1 << 23);
195
arch_mmu_user_address_bits()196 static inline uint32 arch_mmu_user_address_bits()
197 {
198
199 uint64 reg = _arch_mmu_get_tcr();
200
201 return 64 - (reg & T0SZ_MASK);
202 }
203
204
arch_mmu_user_granule()205 static inline uint32 arch_mmu_user_granule()
206 {
207 uint64 reg = _arch_mmu_get_tcr();
208 return ((reg >> TCR_TG0_SHIFT) & TG_MASK);
209 }
210
211
212 /*
213 * Given that "EL2 and EL3 have a TTBR0, but no TTBR1. This means
214 * that is either EL2 or EL3 is using AArch64, they can only use
215 * virtual addresses in the range 0x0 to 0x0000FFFF_FFFFFFFF."
216 *
217 * Following calls might only have sense under EL1
218 */
arch_mmu_kernel_address_bits()219 static inline uint32 arch_mmu_kernel_address_bits()
220 {
221 uint64 reg = _arch_mmu_get_tcr();
222 return 64 - ((reg & T1SZ_MASK) >> TCR_T1SZ_SHIFT);
223 }
224
225
arch_mmu_kernel_granule()226 static inline uint32 arch_mmu_kernel_granule()
227 {
228 uint64 reg = _arch_mmu_get_tcr();
229 return ((reg >> TCR_TG1_SHIFT) & TG_MASK);
230 }
231
232
233 /*
234 * Distinguish between kernel(TTBR1) and user(TTBR0) addressing
235 */
arch_mmu_is_kernel_address(uint64 address)236 static inline bool arch_mmu_is_kernel_address(uint64 address)
237 {
238 return address > KERNEL_BASE;
239 }
240
241
arch_mmu_entries_per_granularity(uint32 granularity)242 static inline constexpr uint32 arch_mmu_entries_per_granularity(uint32 granularity)
243 {
244 return (granularity / 8);
245 }
246