xref: /haiku/src/system/kernel/arch/riscv64/arch_vm.cpp (revision cbe0a0c436162d78cc3f92a305b64918c839d079)
1 /*
2  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
3  * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2019, Adrien Destugues, pulkomandy@pulkomandy.tk
5  * Distributed under the terms of the MIT License.
6  */
7 
8 #include <vm/vm.h>
9 #include <vm/VMAddressSpace.h>
10 #include <arch/vm.h>
11 #include <boot/kernel_args.h>
12 
13 #include "RISCV64VMTranslationMap.h"
14 
15 
16 #define TRACE_ARCH_VM
17 #ifdef TRACE_ARCH_VM
18 #	define TRACE(x) dprintf x
19 #else
20 #	define TRACE(x) ;
21 #endif
22 
23 
24 static uint64_t
25 SignExtendVirtAdr(uint64_t virtAdr)
26 {
27 	if (((uint64_t)1 << 38) & virtAdr)
28 		return virtAdr | 0xFFFFFF8000000000;
29 	return virtAdr;
30 }
31 
32 
33 static Pte*
34 LookupPte(phys_addr_t pageTable, addr_t virtAdr)
35 {
36 	Pte *pte = (Pte*)VirtFromPhys(pageTable);
37 	for (int level = 2; level > 0; level --) {
38 		pte += VirtAdrPte(virtAdr, level);
39 		if (!((1 << pteValid) & pte->flags)) {
40 			return NULL;
41 		}
42 		// TODO: Handle superpages (RWX=0 when not at lowest level)
43 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
44 	}
45 	pte += VirtAdrPte(virtAdr, 0);
46 	return pte;
47 }
48 
49 
50 
51 static void
52 WritePteFlags(uint32 flags)
53 {
54 	bool first = true;
55 	dprintf("{");
56 	for (uint32 i = 0; i < 32; i++) {
57 		if ((1 << i) & flags) {
58 			if (first)
59 				first = false;
60 			else
61 				dprintf(", ");
62 
63 			switch (i) {
64 				case pteValid:
65 					dprintf("valid");
66 					break;
67 				case pteRead:
68 					dprintf("read");
69 					break;
70 				case pteWrite:
71 					dprintf("write");
72 					break;
73 				case pteExec:
74 					dprintf("exec");
75 					break;
76 				case pteUser:
77 					dprintf("user");
78 					break;
79 				case pteGlobal:
80 					dprintf("global");
81 					break;
82 				case pteAccessed:
83 					dprintf("accessed");
84 					break;
85 				case pteDirty:
86 					dprintf("dirty");
87 					break;
88 				default:
89 					dprintf("%" B_PRIu32, i);
90 			}
91 		}
92 	}
93 	dprintf("}");
94 }
95 
96 
97 static void
98 DumpPageWrite(uint64_t virtAdr, uint64_t physAdr, size_t size, uint64 flags,
99 	uint64& firstVirt, uint64& firstPhys, uint64& firstFlags, uint64& len)
100 {
101 	if (virtAdr == firstVirt + len && physAdr == firstPhys + len
102 			&& flags == firstFlags) {
103 		len += size;
104 	} else {
105 		if (len != 0) {
106 			dprintf("  0x%08" B_PRIxADDR " - 0x%08" B_PRIxADDR,
107 				firstVirt, firstVirt + (len - 1));
108 			dprintf(": 0x%08" B_PRIxADDR " - 0x%08" B_PRIxADDR ",%#"
109 				B_PRIxADDR ", ", firstPhys,
110 				firstPhys + (len - 1), len);
111 			WritePteFlags(firstFlags); dprintf("\n");
112 		}
113 		firstVirt = virtAdr;
114 		firstPhys = physAdr;
115 		firstFlags = flags;
116 		len = size;
117 	}
118 }
119 
120 
121 static void
122 DumpPageTableInt(Pte* pte, uint64_t virtAdr, uint32_t level, uint64& firstVirt,
123 	uint64& firstPhys, uint64& firstFlags, uint64& len)
124 {
125 	for (uint32 i = 0; i < pteCount; i++) {
126 		if (((1 << pteValid) & pte[i].flags) != 0) {
127 			if ((((1 << pteRead) | (1 << pteWrite)
128 					| (1 << pteExec)) & pte[i].flags) == 0) {
129 
130 				if (level == 0) {
131 					kprintf("  internal page table on "
132 						"level 0\n");
133 				}
134 
135 				DumpPageTableInt(
136 					(Pte*)VirtFromPhys(pageSize*pte[i].ppn),
137 					virtAdr + ((uint64_t)i
138 						<< (pageBits + pteIdxBits
139 							* level)),
140 					level - 1, firstVirt, firstPhys,
141 						firstFlags, len);
142 			} else {
143 				DumpPageWrite(SignExtendVirtAdr(virtAdr
144 					+ ((uint64_t)i << (pageBits
145 						+ pteIdxBits*level))),
146 					pte[i].ppn * B_PAGE_SIZE,
147 					1 << (pageBits + pteIdxBits*level),
148 					pte[i].flags, firstVirt, firstPhys,
149 					firstFlags, len);
150 			}
151 		}
152 	}
153 }
154 
155 
156 static int
157 DumpPageTable(int argc, char** argv)
158 {
159 	SatpReg satp;
160 	if (argc >= 2) {
161 		team_id id = strtoul(argv[1], NULL, 0);
162 		VMAddressSpace* addrSpace = VMAddressSpace::DebugGet(id);
163 		if (addrSpace == NULL) {
164 			kprintf("could not find team %" B_PRId32 "\n", id);
165 			return 0;
166 		}
167 		satp.val = ((RISCV64VMTranslationMap*)
168 			addrSpace->TranslationMap())->Satp();
169 		dprintf("page table for team %" B_PRId32 "\n", id);
170 	} else {
171 		satp.val = Satp();
172 		dprintf("current page table:\n");
173 	}
174 	Pte* root = (Pte*)VirtFromPhys(satp.ppn * B_PAGE_SIZE);
175 
176 	uint64 firstVirt = 0;
177 	uint64 firstPhys = 0;
178 	uint64 firstFlags = 0;
179 	uint64 len = 0;
180 	DumpPageTableInt(root, 0, 2, firstVirt, firstPhys, firstFlags, len);
181 	DumpPageWrite(0, 0, 0, 0, firstVirt, firstPhys, firstFlags, len);
182 
183 	return 0;
184 }
185 
186 
187 static int
188 DumpVirtPage(int argc, char** argv)
189 {
190 	int curArg = 1;
191 	SatpReg satp;
192 
193 	satp.val = Satp();
194 	while (argv[curArg][0] == '-') {
195 		if (strcmp(argv[curArg], "-team") == 0) {
196 			curArg++;
197 			team_id id = strtoul(argv[curArg++], NULL, 0);
198 			VMAddressSpace* addrSpace = VMAddressSpace::DebugGet(id);
199 			if (addrSpace == NULL) {
200 				kprintf("could not find team %" B_PRId32 "\n", id);
201 				return 0;
202 			}
203 			satp.val = ((RISCV64VMTranslationMap*)
204 				addrSpace->TranslationMap())->Satp();
205 		} else {
206 			kprintf("unknown flag \"%s\"\n", argv[curArg]);
207 			return 0;
208 		}
209 	}
210 
211 	kprintf("satp: %#" B_PRIx64 "\n", satp.val);
212 
213 	uint64 firstVirt = 0;
214 	uint64 firstPhys = 0;
215 	uint64 firstFlags = 0;
216 	uint64 len = B_PAGE_SIZE;
217 	if (!evaluate_debug_expression(argv[curArg++], &firstVirt, false))
218 		return 0;
219 
220 	firstVirt = ROUNDDOWN(firstVirt, B_PAGE_SIZE);
221 
222 	Pte* pte = LookupPte(satp.ppn * B_PAGE_SIZE, firstVirt);
223 	if (pte == NULL) {
224 		dprintf("not mapped\n");
225 		return 0;
226 	}
227 	firstPhys = pte->ppn * B_PAGE_SIZE;
228 	firstFlags = pte->flags;
229 
230 	DumpPageWrite(0, 0, 0, 0, firstVirt, firstPhys, firstFlags, len);
231 
232 	return 0;
233 }
234 
235 
236 status_t
237 arch_vm_init(kernel_args *args)
238 {
239 	return B_OK;
240 }
241 
242 
243 status_t
244 arch_vm_init_post_area(kernel_args *args)
245 {
246 	void* address = (void*)args->arch_args.physMap.start;
247 	area_id area = vm_create_null_area(VMAddressSpace::KernelID(),
248 		"physical map area", &address, B_EXACT_ADDRESS,
249 		args->arch_args.physMap.size, 0);
250 	if (area < B_OK)
251 		return area;
252 
253 	add_debugger_command("dump_page_table", &DumpPageTable, "Dump page table");
254 	add_debugger_command("dump_virt_page", &DumpVirtPage, "Dump virtual page mapping");
255 
256 	return B_OK;
257 }
258 
259 
260 status_t
261 arch_vm_init_post_modules(kernel_args *args)
262 {
263 	return B_OK;
264 }
265 
266 
267 status_t
268 arch_vm_init_end(kernel_args *args)
269 {
270 	TRACE(("arch_vm_init_end(): %" B_PRIu32 " virtual ranges to keep:\n",
271 		args->arch_args.num_virtual_ranges_to_keep));
272 
273 	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
274 		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
275 
276 		TRACE(("  start: %p, size: %#" B_PRIxSIZE "\n", (void*)range.start, range.size));
277 
278 #if 1
279 		// skip ranges outside the kernel address space
280 		if (!IS_KERNEL_ADDRESS(range.start)) {
281 			TRACE(("    no kernel address, skipping...\n"));
282 			continue;
283 		}
284 
285 		phys_addr_t physicalAddress;
286 		void *address = (void*)range.start;
287 		if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
288 				&physicalAddress) != B_OK)
289 			panic("arch_vm_init_end(): No page mapping for %p\n", address);
290 		area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
291 			"boot loader reserved area", &address,
292 			B_EXACT_ADDRESS, range.size,
293 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
294 			physicalAddress, true);
295 		if (area < 0) {
296 			panic("arch_vm_init_end(): Failed to create area for boot loader "
297 				"reserved area: %p - %p\n", (void*)range.start,
298 				(void*)(range.start + range.size));
299 		}
300 #endif
301 	}
302 
303 #if 0
304 	// Throw away any address space mappings we've inherited from the boot
305 	// loader and have not yet turned into an area.
306 	vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
307 #endif
308 
309 	return B_OK;
310 }
311 
312 
313 void
314 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
315 {
316 	// This functions is only invoked when a userland thread is in the process
317 	// of dying. It switches to the kernel team and does whatever cleanup is
318 	// necessary (in case it is the team's main thread, it will delete the
319 	// team).
320 	// It is however not necessary to change the page directory. Userland team's
321 	// page directories include all kernel mappings as well. Furthermore our
322 	// arch specific translation map data objects are ref-counted, so they won't
323 	// go away as long as they are still used on any CPU.
324 
325 	SetSatp(((RISCV64VMTranslationMap*)to->TranslationMap())->Satp());
326 	FlushTlbAll();
327 }
328 
329 
330 bool
331 arch_vm_supports_protection(uint32 protection)
332 {
333 	return true;
334 }
335 
336 
337 void
338 arch_vm_unset_memory_type(VMArea *area)
339 {
340 }
341 
342 
343 status_t
344 arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type)
345 {
346 	return B_OK;
347 }
348