xref: /haiku/src/system/kernel/arch/riscv64/arch_vm.cpp (revision a127b88ecbfab58f64944c98aa47722a18e363b2)
1 /*
2  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
3  * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2019, Adrien Destugues, pulkomandy@pulkomandy.tk
5  * Distributed under the terms of the MIT License.
6  */
7 
8 #include <vm/vm.h>
9 #include <vm/VMAddressSpace.h>
10 #include <arch/vm.h>
11 #include <boot/kernel_args.h>
12 
13 #include "RISCV64VMTranslationMap.h"
14 
15 
16 #define TRACE_ARCH_VM
17 #ifdef TRACE_ARCH_VM
18 #	define TRACE(x) dprintf x
19 #else
20 #	define TRACE(x) ;
21 #endif
22 
23 
24 static uint64_t
25 SignExtendVirtAdr(uint64_t virtAdr)
26 {
27 	if (((uint64_t)1 << 38) & virtAdr)
28 		return virtAdr | 0xFFFFFF8000000000;
29 	return virtAdr;
30 }
31 
32 
33 static Pte*
34 LookupPte(phys_addr_t pageTable, addr_t virtAdr)
35 {
36 	Pte *pte = (Pte*)VirtFromPhys(pageTable);
37 	for (int level = 2; level > 0; level --) {
38 		pte += VirtAdrPte(virtAdr, level);
39 		if (!((1 << pteValid) & pte->flags)) {
40 			return NULL;
41 		}
42 		// TODO: Handle superpages (RWX=0 when not at lowest level)
43 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
44 	}
45 	pte += VirtAdrPte(virtAdr, 0);
46 	return pte;
47 }
48 
49 
50 
51 static void
52 WritePteFlags(uint32 flags)
53 {
54 	bool first = true;
55 	dprintf("{");
56 	for (uint32 i = 0; i < 32; i++) {
57 		if ((1 << i) & flags) {
58 			if (first)
59 				first = false;
60 			else
61 				dprintf(", ");
62 
63 			switch (i) {
64 				case pteValid:
65 					dprintf("valid");
66 					break;
67 				case pteRead:
68 					dprintf("read");
69 					break;
70 				case pteWrite:
71 					dprintf("write");
72 					break;
73 				case pteExec:
74 					dprintf("exec");
75 					break;
76 				case pteUser:
77 					dprintf("user");
78 					break;
79 				case pteGlobal:
80 					dprintf("global");
81 					break;
82 				case pteAccessed:
83 					dprintf("accessed");
84 					break;
85 				case pteDirty:
86 					dprintf("dirty");
87 					break;
88 				default:
89 					dprintf("%" B_PRIu32, i);
90 			}
91 		}
92 	}
93 	dprintf("}");
94 }
95 
96 
97 class PageTableDumper
98 {
99 private:
100 	uint64 firstVirt;
101 	uint64 firstPhys;
102 	uint64 firstFlags;
103 	uint64 len;
104 
105 public:
106 	PageTableDumper()
107 		:
108 		firstVirt(0),
109 		firstPhys(0),
110 		firstFlags(0),
111 		len(0)
112 	{}
113 
114 	~PageTableDumper()
115 	{
116 		Write(0, 0, 0, 0);
117 	}
118 
119 	void Write(uint64_t virtAdr, uint64_t physAdr, size_t size, uint64 flags) {
120 		if (virtAdr == firstVirt + len && physAdr == firstPhys + len && flags == firstFlags) {
121 			len += size;
122 		} else {
123 			if (len != 0) {
124 				dprintf("  0x%08" B_PRIxADDR " - 0x%08" B_PRIxADDR,
125 				firstVirt, firstVirt + (len - 1));
126 				dprintf(": 0x%08" B_PRIxADDR " - 0x%08" B_PRIxADDR ", %#" B_PRIxADDR ", ",
127 					firstPhys, firstPhys + (len - 1), len);
128 				WritePteFlags(firstFlags); dprintf("\n");
129 			}
130 			firstVirt = virtAdr;
131 			firstPhys = physAdr;
132 			firstFlags = flags;
133 			len = size;
134 		}
135 	}
136 };
137 
138 
139 static void
140 DumpPageTableInt(Pte* pte, uint64_t virtAdr, uint32_t level, PageTableDumper& dumper)
141 {
142 	for (uint32 i = 0; i < pteCount; i++) {
143 		if (((1 << pteValid) & pte[i].flags) != 0) {
144 			if ((((1 << pteRead) | (1 << pteWrite)
145 					| (1 << pteExec)) & pte[i].flags) == 0) {
146 
147 				if (level == 0)
148 					kprintf("  internal page table on level 0\n");
149 
150 				DumpPageTableInt((Pte*)VirtFromPhys(B_PAGE_SIZE*pte[i].ppn),
151 					virtAdr + ((uint64_t)i << (pageBits + pteIdxBits * level)),
152 					level - 1, dumper);
153 			} else {
154 				dumper.Write(SignExtendVirtAdr(virtAdr
155 						+ ((uint64_t)i << (pageBits + pteIdxBits*level))),
156 					pte[i].ppn * B_PAGE_SIZE, 1 << (pageBits + pteIdxBits * level),
157 					pte[i].flags);
158 			}
159 		}
160 	}
161 }
162 
163 
164 static int
165 DumpPageTable(int argc, char** argv)
166 {
167 	int curArg = 1;
168 	SatpReg satp;
169 	bool isArea = false;
170 	addr_t base = 0;
171 	size_t size = 0;
172 
173 	satp.val = Satp();
174 	while (curArg < argc && argv[curArg][0] == '-') {
175 		if (strcmp(argv[curArg], "-team") == 0) {
176 			curArg++;
177 			team_id id = strtoul(argv[curArg++], NULL, 0);
178 			VMAddressSpace* addrSpace = VMAddressSpace::DebugGet(id);
179 			if (addrSpace == NULL) {
180 				kprintf("could not find team %" B_PRId32 "\n", id);
181 				return 0;
182 			}
183 			satp.val = ((RISCV64VMTranslationMap*)
184 				addrSpace->TranslationMap())->Satp();
185 			isArea = false;
186 		} else if (strcmp(argv[curArg], "-area") == 0) {
187 			curArg++;
188 			uint64 areaId;
189 			if (!evaluate_debug_expression(argv[curArg++], &areaId, false))
190 				return 0;
191 			VMArea* area = VMAreas::Lookup((area_id)areaId);
192 			if (area == NULL) {
193 				kprintf("could not find area %" B_PRId32 "\n", (area_id)areaId);
194 				return 0;
195 			}
196 			satp.val = ((RISCV64VMTranslationMap*)
197 				area->address_space->TranslationMap())->Satp();
198 			base = area->Base();
199 			size = area->Size();
200 			kprintf("area %" B_PRId32 "(%s)\n", area->id, area->name);
201 				isArea = true;
202 		} else {
203 			kprintf("unknown flag \"%s\"\n", argv[curArg]);
204 			return 0;
205 		}
206 	}
207 
208 	kprintf("satp: %#" B_PRIx64 "\n", satp.val);
209 
210 	PageTableDumper dumper;
211 
212 	if (!isArea) {
213 		Pte* root = (Pte*)VirtFromPhys(satp.ppn * B_PAGE_SIZE);
214 		DumpPageTableInt(root, 0, 2, dumper);
215 	} else {
216 		for (; size > 0; base += B_PAGE_SIZE, size -= B_PAGE_SIZE) {
217 			Pte* pte = LookupPte(satp.ppn * B_PAGE_SIZE, base);
218 			if (pte == NULL || (pte->flags & (1 << pteValid)) == 0)
219 				continue;
220 
221 			dumper.Write(base, pte->ppn * B_PAGE_SIZE, B_PAGE_SIZE, pte->flags);
222 		}
223 	}
224 
225 	return 0;
226 }
227 
228 
229 static int
230 DumpVirtPage(int argc, char** argv)
231 {
232 	int curArg = 1;
233 	SatpReg satp;
234 
235 	satp.val = Satp();
236 	while (curArg < argc && argv[curArg][0] == '-') {
237 		if (strcmp(argv[curArg], "-team") == 0) {
238 			curArg++;
239 			team_id id = strtoul(argv[curArg++], NULL, 0);
240 			VMAddressSpace* addrSpace = VMAddressSpace::DebugGet(id);
241 			if (addrSpace == NULL) {
242 				kprintf("could not find team %" B_PRId32 "\n", id);
243 				return 0;
244 			}
245 			satp.val = ((RISCV64VMTranslationMap*)
246 				addrSpace->TranslationMap())->Satp();
247 		} else {
248 			kprintf("unknown flag \"%s\"\n", argv[curArg]);
249 			return 0;
250 		}
251 	}
252 
253 	kprintf("satp: %#" B_PRIx64 "\n", satp.val);
254 
255 	uint64 virt = 0;
256 	if (!evaluate_debug_expression(argv[curArg++], &virt, false))
257 		return 0;
258 
259 	virt = ROUNDDOWN(virt, B_PAGE_SIZE);
260 
261 	Pte* pte = LookupPte(satp.ppn * B_PAGE_SIZE, virt);
262 	if (pte == NULL) {
263 		dprintf("not mapped\n");
264 		return 0;
265 	}
266 
267 	PageTableDumper dumper;
268 	dumper.Write(virt, pte->ppn * B_PAGE_SIZE, B_PAGE_SIZE, pte->flags);
269 
270 	return 0;
271 }
272 
273 
274 status_t
275 arch_vm_init(kernel_args *args)
276 {
277 	return B_OK;
278 }
279 
280 
281 status_t
282 arch_vm_init_post_area(kernel_args *args)
283 {
284 	void* address = (void*)args->arch_args.physMap.start;
285 	area_id area = vm_create_null_area(VMAddressSpace::KernelID(),
286 		"physical map area", &address, B_EXACT_ADDRESS,
287 		args->arch_args.physMap.size, 0);
288 	if (area < B_OK)
289 		return area;
290 
291 	add_debugger_command("dump_page_table", &DumpPageTable, "Dump page table");
292 	add_debugger_command("dump_virt_page", &DumpVirtPage, "Dump virtual page mapping");
293 
294 	return B_OK;
295 }
296 
297 
298 status_t
299 arch_vm_init_post_modules(kernel_args *args)
300 {
301 	return B_OK;
302 }
303 
304 
305 status_t
306 arch_vm_init_end(kernel_args *args)
307 {
308 	TRACE(("arch_vm_init_end(): %" B_PRIu32 " virtual ranges to keep:\n",
309 		args->arch_args.num_virtual_ranges_to_keep));
310 
311 	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
312 		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
313 
314 		TRACE(("  start: %p, size: %#" B_PRIxSIZE "\n", (void*)range.start, range.size));
315 
316 #if 1
317 		// skip ranges outside the kernel address space
318 		if (!IS_KERNEL_ADDRESS(range.start)) {
319 			TRACE(("    no kernel address, skipping...\n"));
320 			continue;
321 		}
322 
323 		phys_addr_t physicalAddress;
324 		void *address = (void*)range.start;
325 		if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
326 				&physicalAddress) != B_OK)
327 			panic("arch_vm_init_end(): No page mapping for %p\n", address);
328 		area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
329 			"boot loader reserved area", &address,
330 			B_EXACT_ADDRESS, range.size,
331 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
332 			physicalAddress, true);
333 		if (area < 0) {
334 			panic("arch_vm_init_end(): Failed to create area for boot loader "
335 				"reserved area: %p - %p\n", (void*)range.start,
336 				(void*)(range.start + range.size));
337 		}
338 #endif
339 	}
340 
341 #if 0
342 	// Throw away any address space mappings we've inherited from the boot
343 	// loader and have not yet turned into an area.
344 	vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
345 #endif
346 
347 	return B_OK;
348 }
349 
350 
351 void
352 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
353 {
354 	// This functions is only invoked when a userland thread is in the process
355 	// of dying. It switches to the kernel team and does whatever cleanup is
356 	// necessary (in case it is the team's main thread, it will delete the
357 	// team).
358 	// It is however not necessary to change the page directory. Userland team's
359 	// page directories include all kernel mappings as well. Furthermore our
360 	// arch specific translation map data objects are ref-counted, so they won't
361 	// go away as long as they are still used on any CPU.
362 
363 	SetSatp(((RISCV64VMTranslationMap*)to->TranslationMap())->Satp());
364 	FlushTlbAll();
365 }
366 
367 
368 bool
369 arch_vm_supports_protection(uint32 protection)
370 {
371 	return true;
372 }
373 
374 
375 void
376 arch_vm_unset_memory_type(VMArea *area)
377 {
378 }
379 
380 
381 status_t
382 arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type)
383 {
384 	return B_OK;
385 }
386