xref: /haiku/src/system/kernel/arch/arm64/arch_vm_translation_map.cpp (revision 6f80a9801fedbe7355c4360bd204ba746ec3ec2d)
1 /*
2  * Copyright 2019 Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <arch/vm_translation_map.h>
8 #include <boot/kernel_args.h>
9 #include <vm/VMAddressSpace.h>
10 #include <vm/vm.h>
11 
12 #include "PMAPPhysicalPageMapper.h"
13 #include "VMSAv8TranslationMap.h"
14 
15 static char sPhysicalPageMapperData[sizeof(PMAPPhysicalPageMapper)];
16 
17 
18 status_t
19 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
20 {
21 	phys_addr_t pt = 0;
22 	if (kernel) {
23 		pt = READ_SPECIALREG(TTBR1_EL1);
24 	} else {
25 		panic("arch_vm_translation_map_create_map user not implemented");
26 	}
27 
28 	*_map = new (std::nothrow) VMSAv8TranslationMap(kernel, pt, 12, 48, 1);
29 
30 	if (*_map == NULL)
31 		return B_NO_MEMORY;
32 
33 	return B_OK;
34 }
35 
36 
37 status_t
38 arch_vm_translation_map_init(kernel_args* args, VMPhysicalPageMapper** _physicalPageMapper)
39 {
40 	dprintf("arch_vm_translation_map_init\n");
41 
42 	// nuke TTBR0 mapping, we use identity mapping in kernel space at KERNEL_PMAP_BASE
43 	memset((void*) READ_SPECIALREG(TTBR0_EL1), 0, B_PAGE_SIZE);
44 
45 	uint64_t tcr = READ_SPECIALREG(TCR_EL1);
46 	uint32_t t0sz = tcr & 0x1f;
47 	uint32_t t1sz = (tcr >> 16) & 0x1f;
48 	uint32_t tg0 = (tcr >> 14) & 0x3;
49 	uint32_t tg1 = (tcr >> 30) & 0x3;
50 	uint64_t ttbr0 = READ_SPECIALREG(TTBR0_EL1);
51 	uint64_t ttbr1 = READ_SPECIALREG(TTBR1_EL1);
52 	uint64_t mair = READ_SPECIALREG(MAIR_EL1);
53 	uint64_t mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
54 	uint64_t sctlr = READ_SPECIALREG(SCTLR_EL1);
55 
56 	uint64_t hafdbs = ID_AA64MMFR1_HAFDBS(mmfr1);
57 	if (hafdbs == ID_AA64MMFR1_HAFDBS_AF) {
58 		VMSAv8TranslationMap::fHwFeature = VMSAv8TranslationMap::HW_ACCESS;
59 		tcr |= (1UL << 39);
60 	}
61 	if (hafdbs == ID_AA64MMFR1_HAFDBS_AF_DBS) {
62 		VMSAv8TranslationMap::fHwFeature
63 			= VMSAv8TranslationMap::HW_ACCESS | VMSAv8TranslationMap::HW_DIRTY;
64 		tcr |= (1UL << 40) | (1UL << 39);
65 	}
66 
67 	VMSAv8TranslationMap::fMair = mair;
68 
69 	WRITE_SPECIALREG(TCR_EL1, tcr);
70 
71 	dprintf("vm config: MMFR1: %lx, TCR: %lx\nTTBR0: %lx, TTBR1: %lx\nT0SZ: %u, T1SZ: %u, TG0: %u, "
72 			"TG1: %u, MAIR: %lx, SCTLR: %lx\n",
73 		mmfr1, tcr, ttbr0, ttbr1, t0sz, t1sz, tg0, tg1, mair, sctlr);
74 
75 	*_physicalPageMapper = new (&sPhysicalPageMapperData) PMAPPhysicalPageMapper();
76 
77 	return B_OK;
78 }
79 
80 
81 status_t
82 arch_vm_translation_map_init_post_sem(kernel_args* args)
83 {
84 	dprintf("arch_vm_translation_map_init_post_sem\n");
85 	return B_OK;
86 }
87 
88 
89 status_t
90 arch_vm_translation_map_init_post_area(kernel_args* args)
91 {
92 	dprintf("arch_vm_translation_map_init_post_area\n");
93 
94 	// Create an area covering the physical map area.
95 	void* address = (void*) KERNEL_PMAP_BASE;
96 	area_id area = vm_create_null_area(VMAddressSpace::KernelID(), "physical map area", &address,
97 		B_EXACT_ADDRESS, KERNEL_PMAP_SIZE, 0);
98 
99 	return B_OK;
100 }
101 
102 // TODO: reuse some bits from VMSAv8TranslationMap
103 
104 static constexpr uint64_t kPteAddrMask = (((1UL << 36) - 1) << 12);
105 static constexpr uint64_t kPteAttrMask = ~(kPteAddrMask | 0x3);
106 
107 static uint64_t page_bits = 12;
108 static uint64_t tsz = 16;
109 
110 
111 static uint64_t*
112 TableFromPa(phys_addr_t pa)
113 {
114 	return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
115 }
116 
117 
118 static void
119 map_page_early(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa,
120 	phys_addr_t (*get_free_page)(kernel_args*), kernel_args* args)
121 {
122 	int tableBits = page_bits - 3;
123 	uint64_t tableMask = (1UL << tableBits) - 1;
124 
125 	int shift = tableBits * (3 - level) + page_bits;
126 
127 	int index = (va >> shift) & tableMask;
128 	uint64_t* pte = &TableFromPa(ptPa)[index];
129 
130 	if (level == 3) {
131 		atomic_set64((int64*) pte, pa | 0x3);
132 		asm("dsb ish");
133 	} else {
134 		uint64_t pteVal = atomic_get64((int64*) pte);
135 		int type = pteVal & 0x3;
136 
137 		phys_addr_t table;
138 		if (type == 0x3) {
139 			table = pteVal & kPteAddrMask;
140 		} else {
141 			table = get_free_page(args) << page_bits;
142 			dprintf("early: pulling page %lx\n", table);
143 			uint64_t* newTableVa = TableFromPa(table);
144 
145 			if (type == 0x1) {
146 				int shift = tableBits * (3 - (level + 1)) + page_bits;
147 				int entrySize = 1UL << shift;
148 
149 				for (int i = 0; i < (1 << tableBits); i++)
150 					newTableVa[i] = pteVal + i * entrySize;
151 			} else {
152 				memset(newTableVa, 0, 1 << page_bits);
153 			}
154 
155 			asm("dsb ish");
156 
157 			atomic_set64((int64*) pte, table | 0x3);
158 		}
159 
160 		map_page_early(table, level + 1, va, pa, get_free_page, args);
161 	}
162 }
163 
164 
165 status_t
166 arch_vm_translation_map_early_map(kernel_args* args, addr_t va, phys_addr_t pa, uint8 attributes,
167 	phys_addr_t (*get_free_page)(kernel_args*))
168 {
169 	int va_bits = 64 - tsz;
170 	uint64_t va_mask = (1UL << va_bits) - 1;
171 	ASSERT((va & ~va_mask) == ~va_mask);
172 
173 	phys_addr_t ptPa = READ_SPECIALREG(TTBR1_EL1);
174 	int level = VMSAv8TranslationMap::CalcStartLevel(va_bits, page_bits);
175 	va &= va_mask;
176 	pa |= VMSAv8TranslationMap::GetMemoryAttr(attributes, 0, true);
177 
178 	map_page_early(ptPa, level, va, pa, get_free_page, args);
179 
180 	return B_OK;
181 }
182 
183 
184 bool
185 arch_vm_translation_map_is_kernel_page_accessible(addr_t va, uint32 protection)
186 {
187 	if (protection & B_KERNEL_WRITE_AREA) {
188 		asm("at s1e1w, %0" : : "r"((uint64_t) va));
189 		return (READ_SPECIALREG(PAR_EL1) & PAR_F) == 0;
190 	} else {
191 		asm("at s1e1r, %0" : : "r"((uint64_t) va));
192 		return (READ_SPECIALREG(PAR_EL1) & PAR_F) == 0;
193 	}
194 }
195