xref: /haiku/src/system/kernel/arch/arm64/arch_vm_translation_map.cpp (revision 6a2d53e7237764eab0c7b6d121772f26d636fb60)
1 /*
2  * Copyright 2019 Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <arch/vm_translation_map.h>
8 #include <boot/kernel_args.h>
9 #include <vm/VMAddressSpace.h>
10 #include <vm/vm.h>
11 #include <vm/vm_page.h>
12 
13 #include "PMAPPhysicalPageMapper.h"
14 #include "VMSAv8TranslationMap.h"
15 
16 static char sPhysicalPageMapperData[sizeof(PMAPPhysicalPageMapper)];
17 
18 // Physical pointer to an empty page table, which is used for break-before-make
19 // when updating TTBR0_EL1.
20 static phys_addr_t sEmptyTable;
21 
22 
23 static void
24 arch_vm_alloc_empty_table(void)
25 {
26 	vm_page_reservation reservation;
27 	vm_page_reserve_pages(&reservation, 1, VM_PRIORITY_SYSTEM);
28 	vm_page* page = vm_page_allocate_page(&reservation, PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
29 	DEBUG_PAGE_ACCESS_END(page);
30 	sEmptyTable = page->physical_page_number << PAGE_SHIFT;
31 }
32 
33 
34 void
35 arch_vm_install_empty_table_ttbr0(void)
36 {
37 	WRITE_SPECIALREG(TTBR0_EL1, sEmptyTable);
38 	asm("isb");
39 }
40 
41 
42 status_t
43 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
44 {
45 	phys_addr_t pt = 0;
46 	if (kernel) {
47 		pt = READ_SPECIALREG(TTBR1_EL1) & kTtbrBasePhysAddrMask;
48 		arch_vm_install_empty_table_ttbr0();
49 	}
50 
51 	*_map = new (std::nothrow) VMSAv8TranslationMap(kernel, pt, 12, 48, 1);
52 
53 	if (*_map == NULL)
54 		return B_NO_MEMORY;
55 
56 	return B_OK;
57 }
58 
59 
60 status_t
61 arch_vm_translation_map_init(kernel_args* args, VMPhysicalPageMapper** _physicalPageMapper)
62 {
63 	dprintf("arch_vm_translation_map_init\n");
64 
65 	// nuke TTBR0 mapping, we use identity mapping in kernel space at KERNEL_PMAP_BASE
66 	memset((void*) READ_SPECIALREG(TTBR0_EL1), 0, B_PAGE_SIZE);
67 
68 	uint64_t tcr = READ_SPECIALREG(TCR_EL1);
69 	uint32_t t0sz = tcr & 0x1f;
70 	uint32_t t1sz = (tcr >> 16) & 0x1f;
71 	uint32_t tg0 = (tcr >> 14) & 0x3;
72 	uint32_t tg1 = (tcr >> 30) & 0x3;
73 	uint64_t ttbr0 = READ_SPECIALREG(TTBR0_EL1);
74 	uint64_t ttbr1 = READ_SPECIALREG(TTBR1_EL1);
75 	uint64_t mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
76 	uint64_t mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
77 	uint64_t sctlr = READ_SPECIALREG(SCTLR_EL1);
78 
79 	ASSERT(VMSAv8TranslationMap::fHwFeature == 0);
80 	uint64_t hafdbs = ID_AA64MMFR1_HAFDBS(mmfr1);
81 	if (hafdbs == ID_AA64MMFR1_HAFDBS_AF) {
82 		VMSAv8TranslationMap::fHwFeature = VMSAv8TranslationMap::HW_ACCESS;
83 		tcr |= (1UL << 39);
84 	}
85 	if (hafdbs == ID_AA64MMFR1_HAFDBS_AF_DBS) {
86 		VMSAv8TranslationMap::fHwFeature
87 			= VMSAv8TranslationMap::HW_ACCESS | VMSAv8TranslationMap::HW_DIRTY;
88 		tcr |= (1UL << 40) | (1UL << 39);
89 	}
90 
91 	if (ID_AA64MMFR2_CNP(mmfr2) == ID_AA64MMFR2_CNP_IMPL) {
92 		VMSAv8TranslationMap::fHwFeature |= VMSAv8TranslationMap::HW_COMMON_NOT_PRIVATE;
93 	}
94 
95 	uint64_t mair =
96 		(MAIR_DEVICE_nGnRnE) |   // Uncached
97 		(MAIR_DEVICE_GRE << 8) | // Write-combining
98 		(MAIR_NORMAL_WT << 16) | // Write-through
99 		(MAIR_NORMAL_WB << 24);  // Write-back
100 	VMSAv8TranslationMap::fMair = mair;
101 	WRITE_SPECIALREG(MAIR_EL1, mair);
102 
103 	WRITE_SPECIALREG(TCR_EL1, tcr);
104 
105 	dprintf("vm config: MMFR1: %lx, MMFR2: %lx, TCR: %lx\nTTBR0: %lx, TTBR1: %lx\nT0SZ: %u, "
106 			"T1SZ: %u, TG0: %u, TG1: %u, MAIR: %lx, SCTLR: %lx\n",
107 		mmfr1, mmfr2, tcr, ttbr0, ttbr1, t0sz, t1sz, tg0, tg1, mair, sctlr);
108 
109 	*_physicalPageMapper = new (&sPhysicalPageMapperData) PMAPPhysicalPageMapper();
110 
111 	return B_OK;
112 }
113 
114 
115 status_t
116 arch_vm_translation_map_init_post_sem(kernel_args* args)
117 {
118 	dprintf("arch_vm_translation_map_init_post_sem\n");
119 
120 	// Create an empty page table for use when we don't want a userspace page table.
121 	arch_vm_alloc_empty_table();
122 
123 	return B_OK;
124 }
125 
126 
127 status_t
128 arch_vm_translation_map_init_post_area(kernel_args* args)
129 {
130 	dprintf("arch_vm_translation_map_init_post_area\n");
131 
132 	// Create an area covering the physical map area.
133 	void* address = (void*) KERNEL_PMAP_BASE;
134 	area_id area = vm_create_null_area(VMAddressSpace::KernelID(), "physical map area", &address,
135 		B_EXACT_ADDRESS, KERNEL_PMAP_SIZE, 0);
136 
137 	if (args->arch_args.uart.kind[0] != 0) {
138 		// debug uart is already mapped by the efi loader
139 		address = (void*)args->arch_args.uart.regs.start;
140 		area_id area = vm_create_null_area(VMAddressSpace::KernelID(),
141 			"debug uart map area", &address, B_EXACT_ADDRESS,
142 			ROUNDUP(args->arch_args.uart.regs.size, B_PAGE_SIZE), 0);
143 	}
144 
145 	return B_OK;
146 }
147 
148 // TODO: reuse some bits from VMSAv8TranslationMap
149 
150 static uint64_t page_bits = 12;
151 static uint64_t tsz = 16;
152 
153 
154 static uint64_t*
155 TableFromPa(phys_addr_t pa)
156 {
157 	return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
158 }
159 
160 
161 static void
162 map_page_early(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa,
163 	phys_addr_t (*get_free_page)(kernel_args*), kernel_args* args)
164 {
165 	int tableBits = page_bits - 3;
166 	uint64_t tableMask = (1UL << tableBits) - 1;
167 
168 	int shift = tableBits * (3 - level) + page_bits;
169 
170 	int index = (va >> shift) & tableMask;
171 	uint64_t* pte = &TableFromPa(ptPa)[index];
172 
173 	if (level == 3) {
174 		atomic_set64((int64*) pte, pa | 0x3);
175 		asm("dsb ish");
176 	} else {
177 		uint64_t pteVal = atomic_get64((int64*) pte);
178 		int type = pteVal & 0x3;
179 
180 		phys_addr_t table;
181 		if (type == 0x3) {
182 			table = pteVal & kPteAddrMask;
183 		} else {
184 			table = get_free_page(args) << page_bits;
185 			dprintf("early: pulling page %lx\n", table);
186 			uint64_t* newTableVa = TableFromPa(table);
187 
188 			if (type == 0x1) {
189 				int shift = tableBits * (3 - (level + 1)) + page_bits;
190 				int entrySize = 1UL << shift;
191 
192 				for (int i = 0; i < (1 << tableBits); i++)
193 					newTableVa[i] = pteVal + i * entrySize;
194 			} else {
195 				memset(newTableVa, 0, 1 << page_bits);
196 			}
197 
198 			asm("dsb ish");
199 
200 			atomic_set64((int64*) pte, table | 0x3);
201 		}
202 
203 		map_page_early(table, level + 1, va, pa, get_free_page, args);
204 	}
205 }
206 
207 
208 status_t
209 arch_vm_translation_map_early_map(kernel_args* args, addr_t va, phys_addr_t pa, uint8 attributes,
210 	phys_addr_t (*get_free_page)(kernel_args*))
211 {
212 	int va_bits = 64 - tsz;
213 	uint64_t va_mask = (1UL << va_bits) - 1;
214 	ASSERT((va & ~va_mask) == ~va_mask);
215 
216 	phys_addr_t ptPa = READ_SPECIALREG(TTBR1_EL1) & kTtbrBasePhysAddrMask;
217 	int level = VMSAv8TranslationMap::CalcStartLevel(va_bits, page_bits);
218 	va &= va_mask;
219 	pa |= VMSAv8TranslationMap::GetMemoryAttr(attributes, 0, true);
220 
221 	map_page_early(ptPa, level, va, pa, get_free_page, args);
222 
223 	return B_OK;
224 }
225 
226 
227 bool
228 arch_vm_translation_map_is_kernel_page_accessible(addr_t va, uint32 protection)
229 {
230 	if (protection & B_KERNEL_WRITE_AREA) {
231 		asm("at s1e1w, %0" : : "r"((uint64_t) va));
232 		return (READ_SPECIALREG(PAR_EL1) & PAR_F) == 0;
233 	} else {
234 		asm("at s1e1r, %0" : : "r"((uint64_t) va));
235 		return (READ_SPECIALREG(PAR_EL1) & PAR_F) == 0;
236 	}
237 }
238