xref: /haiku/src/system/boot/platform/riscv/mmu.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <kernel.h>
18 #include <AutoDeleter.h>
19 
20 #include <OS.h>
21 
22 #include <string.h>
23 
24 
25 struct MemoryRegion
26 {
27 	MemoryRegion* next;
28 	addr_t virtAdr;
29 	phys_addr_t physAdr;
30 	size_t size;
31 	uint32 protection;
32 };
33 
34 
35 extern uint8 gStackEnd;
36 
37 uint8* gMemBase = NULL;
38 size_t gTotalMem = 0;
39 uint8* gFreeMem = &gStackEnd;
40 addr_t gFreeVirtMem = KERNEL_LOAD_BASE;
41 
42 MemoryRegion* sRegions = NULL;
43 
44 ssize_t gVirtFromPhysOffset = 0;
45 phys_addr_t sPageTable = 0;
46 
47 
48 static void
49 WritePteFlags(uint32 flags)
50 {
51 	bool first = true;
52 	dprintf("{");
53 	for (uint32 i = 0; i < 32; i++) {
54 		if ((1 << i) & flags) {
55 			if (first) first = false; else dprintf(", ");
56 			switch (i) {
57 			case 0:  dprintf("valid"); break;
58 			case 1:  dprintf("read"); break;
59 			case 2:  dprintf("write"); break;
60 			case 3:  dprintf("exec"); break;
61 			case 4:  dprintf("user"); break;
62 			case 5:  dprintf("global"); break;
63 			case 6:  dprintf("accessed"); break;
64 			case 7:  dprintf("dirty"); break;
65 			default: dprintf("%" B_PRIu32, i);
66 			}
67 		}
68 	}
69 	dprintf("}");
70 }
71 
72 
73 static phys_addr_t
74 AllocPhysPages(size_t size)
75 {
76 	size = ROUNDUP(size, B_PAGE_SIZE);
77 	phys_addr_t adr = ROUNDUP((addr_t)gFreeMem, B_PAGE_SIZE);
78 
79 	if (adr + size - (addr_t)gMemBase > gTotalMem)
80 		return 0;
81 
82 	gFreeMem = (uint8*)(adr + size);
83 
84 	return adr;
85 }
86 
87 
88 static phys_addr_t
89 AllocPhysPage()
90 {
91 	return AllocPhysPages(B_PAGE_SIZE);
92 }
93 
94 
95 static void
96 FreePhysPages(phys_addr_t physAdr, size_t size)
97 {
98 	if (physAdr + size == (phys_addr_t)gFreeMem)
99 		gFreeMem -= size;
100 }
101 
102 
103 static phys_addr_t
104 AllocVirtPages(size_t size)
105 {
106 	size = ROUNDUP(size, B_PAGE_SIZE);
107 	phys_addr_t adr = ROUNDUP(gFreeVirtMem, B_PAGE_SIZE);
108 	gFreeVirtMem = adr + size;
109 
110 	return adr;
111 }
112 
113 
114 static void
115 FreeVirtPages(addr_t virtAdr, size_t size)
116 {
117 	if (virtAdr + size == gFreeVirtMem)
118 		gFreeVirtMem -= size;
119 }
120 
121 
122 static inline void*
123 VirtFromPhys(phys_addr_t physAdr)
124 {
125 	return (void*)physAdr;
126 }
127 
128 
129 static inline phys_addr_t
130 PhysFromVirt(void* virtAdr)
131 {
132 	return (phys_addr_t)virtAdr;
133 }
134 
135 
136 static Pte*
137 LookupPte(addr_t virtAdr, bool alloc)
138 {
139 	Pte *pte = (Pte*)VirtFromPhys(sPageTable);
140 	for (int level = 2; level > 0; level--) {
141 		pte += VirtAdrPte(virtAdr, level);
142 		if (!pte->isValid) {
143 			if (!alloc)
144 				return NULL;
145 			uint64 ppn = AllocPhysPage() / B_PAGE_SIZE;
146 			if (ppn == 0)
147 				return NULL;
148 			memset((Pte*)VirtFromPhys(B_PAGE_SIZE * ppn), 0, B_PAGE_SIZE);
149 			Pte newPte {
150 				.isValid = true,
151 				.isGlobal = IS_KERNEL_ADDRESS(virtAdr),
152 				.ppn = ppn
153 			};
154 			pte->val = newPte.val;
155 		}
156 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
157 	}
158 	pte += VirtAdrPte(virtAdr, 0);
159 	return pte;
160 }
161 
162 
163 static void
164 Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags)
165 {
166 	// dprintf("Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ")\n", virtAdr, physAdr);
167 	Pte* pte = LookupPte(virtAdr, true);
168 	if (pte == NULL)
169 		panic("can't allocate page table");
170 
171 	Pte newPte {
172 		.isValid = true,
173 		.isGlobal = IS_KERNEL_ADDRESS(virtAdr),
174 		.isAccessed = true,
175 		.isDirty = true,
176 		.ppn = physAdr / B_PAGE_SIZE
177 	};
178 	newPte.val |= flags;
179 
180 	pte->val = newPte.val;
181 }
182 
183 
184 static void
185 MapRange(addr_t virtAdr, phys_addr_t physAdr, size_t size, uint64 flags)
186 {
187 	dprintf("MapRange(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ", 0x%"
188 		B_PRIxADDR ", ", virtAdr, physAdr, size);
189 	WritePteFlags(flags);
190 	dprintf(")\n");
191 	for (size_t i = 0; i < size; i += B_PAGE_SIZE)
192 		Map(virtAdr + i, physAdr + i, flags);
193 
194 	ASSERT_ALWAYS(insert_virtual_allocated_range(virtAdr, size) >= B_OK);
195 }
196 
197 
198 static void
199 MapRangeIdentity(addr_t adr, size_t size, uint64 flags)
200 {
201 	MapRange(adr, adr, size, flags);
202 }
203 
204 
205 static void
206 MapAddrRange(addr_range& range, uint64 flags)
207 {
208 	phys_addr_t physAdr = range.start;
209 	range.start = AllocVirtPages(range.size);
210 
211 	MapRange(range.start, physAdr, range.size, flags);
212 
213 	if (gKernelArgs.arch_args.num_virtual_ranges_to_keep
214 		>= MAX_VIRTUAL_RANGES_TO_KEEP)
215 		panic("too many virtual ranges to keep");
216 
217 	gKernelArgs.arch_args.virtual_ranges_to_keep[
218 		gKernelArgs.arch_args.num_virtual_ranges_to_keep++] = range;
219 }
220 
221 
222 static void
223 PreallocKernelRange()
224 {
225 	Pte *root = (Pte*)VirtFromPhys(sPageTable);
226 	for (uint64 i = VirtAdrPte(KERNEL_BASE, 2); i <= VirtAdrPte(KERNEL_TOP, 2);
227 		i++) {
228 		Pte* pte = &root[i];
229 		uint64 ppn = AllocPhysPage() / B_PAGE_SIZE;
230 		if (ppn == 0) panic("can't alloc early physical page");
231 		memset(VirtFromPhys(B_PAGE_SIZE * ppn), 0, B_PAGE_SIZE);
232 		Pte newPte {
233 			.isValid = true,
234 			.isGlobal = true,
235 			.ppn = ppn
236 		};
237 		pte->val = newPte.val;
238 	}
239 }
240 
241 
242 static void
243 SetupPageTable()
244 {
245 	sPageTable = AllocPhysPage();
246 	memset(VirtFromPhys(sPageTable), 0, B_PAGE_SIZE);
247 
248 	PreallocKernelRange();
249 
250 	// Physical memory mapping
251 	gKernelArgs.arch_args.physMap.size
252 		= gKernelArgs.physical_memory_range[0].size;
253 	gKernelArgs.arch_args.physMap.start = KERNEL_TOP + 1
254 		- gKernelArgs.arch_args.physMap.size;
255 	MapRange(gKernelArgs.arch_args.physMap.start,
256 		gKernelArgs.physical_memory_range[0].start,
257 		gKernelArgs.arch_args.physMap.size,
258 		Pte {.isRead = true, .isWrite = true}.val);
259 
260 	// Boot loader
261 	MapRangeIdentity((addr_t)gMemBase, &gStackEnd - gMemBase,
262 		Pte {.isRead = true, .isWrite = true, .isExec = true}.val);
263 
264 	// Memory regions
265 	MemoryRegion* region;
266 	for (region = sRegions; region != NULL; region = region->next) {
267 		Pte flags {
268 			.isRead  = (region->protection & B_READ_AREA)    != 0,
269 			.isWrite = (region->protection & B_WRITE_AREA)   != 0,
270 			.isExec  = (region->protection & B_EXECUTE_AREA) != 0
271 		};
272 		MapRange(region->virtAdr, region->physAdr, region->size, flags.val);
273 	}
274 
275 	// Devices
276 	MapAddrRange(gKernelArgs.arch_args.clint, Pte {.isRead = true, .isWrite = true}.val);
277 	MapAddrRange(gKernelArgs.arch_args.htif, Pte {.isRead = true, .isWrite = true}.val);
278 	MapAddrRange(gKernelArgs.arch_args.plic, Pte {.isRead = true, .isWrite = true}.val);
279 	if (strcmp(gKernelArgs.arch_args.uart.kind, "") != 0) {
280 		MapAddrRange(gKernelArgs.arch_args.uart.regs,
281 			Pte {.isRead = true, .isWrite = true}.val);
282 	}
283 }
284 
285 
286 static uint64
287 GetSatp()
288 {
289 	return SatpReg{
290 		.ppn = sPageTable / B_PAGE_SIZE,
291 		.asid = 0,
292 		.mode = satpModeSv39
293 	}.val;
294 }
295 
296 
297 //	#pragma mark -
298 
299 extern "C" status_t
300 platform_allocate_region(void** address, size_t size, uint8 protection,
301 	bool exactAddress)
302 {
303 	size = ROUNDUP(size, B_PAGE_SIZE);
304 
305 	if (exactAddress)
306 		return B_ERROR;
307 
308 	ObjectDeleter<MemoryRegion> region(new(std::nothrow) MemoryRegion());
309 	if (!region.IsSet())
310 		return B_NO_MEMORY;
311 
312 	region->physAdr = AllocPhysPages(size);
313 	if (region->physAdr == 0)
314 		return B_NO_MEMORY;
315 
316 	region->virtAdr = AllocVirtPages(size);
317 	region->size = size;
318 	region->protection = protection;
319 
320 	*address = (void*)region->physAdr;
321 
322 	region->next = sRegions;
323 	sRegions = region.Detach();
324 
325 	return B_OK;
326 }
327 
328 
329 extern "C" status_t
330 platform_free_region(void* address, size_t size)
331 {
332 	MemoryRegion* prev = NULL;
333 	MemoryRegion* region = sRegions;
334 	while (region != NULL && !(region->physAdr == (phys_addr_t)address)) {
335 		prev = region;
336 		region = region->next;
337 	}
338 	if (region == NULL) {
339 		panic("platform_free_region: address %p is not allocated\n", address);
340 		return B_ERROR;
341 	}
342 	FreePhysPages(region->physAdr, region->size);
343 	FreeVirtPages(region->virtAdr, region->size);
344 	if (prev == NULL)
345 		sRegions = region->next;
346 	else
347 		prev->next = region->next;
348 
349 	delete region;
350 
351 	return B_OK;
352 }
353 
354 
355 ssize_t
356 platform_allocate_heap_region(size_t size, void **_base)
357 {
358 	addr_t heap = AllocPhysPages(size);
359 	if (heap == 0)
360 		return B_NO_MEMORY;
361 
362 	*_base = (void*)heap;
363 	return size;
364 }
365 
366 
367 void
368 platform_free_heap_region(void *_base, size_t size)
369 {
370 }
371 
372 
373 status_t
374 platform_bootloader_address_to_kernel_address(void* address, addr_t* result)
375 {
376 	MemoryRegion* region = sRegions;
377 	while (region != NULL && !((phys_addr_t)address >= region->physAdr
378 		&& (phys_addr_t)address < region->physAdr + region->size))
379 		region = region->next;
380 
381 	if (region == NULL)
382 		return B_ERROR;
383 
384 	*result = (addr_t)address - region->physAdr + region->virtAdr;
385 	return B_OK;
386 }
387 
388 
389 status_t
390 platform_kernel_address_to_bootloader_address(addr_t address, void** result)
391 {
392 	MemoryRegion* region = sRegions;
393 	while (region != NULL && !((phys_addr_t)address >= region->virtAdr
394 		&& (phys_addr_t)address < region->virtAdr + region->size))
395 		region = region->next;
396 
397 	if (region == NULL)
398 		return B_ERROR;
399 
400 	*result = (void*)(address - region->virtAdr + region->physAdr);
401 	return B_OK;
402 }
403 
404 
405 //	#pragma mark -
406 
407 void
408 mmu_init(void)
409 {
410 }
411 
412 
413 void
414 mmu_init_for_kernel(addr_t& satp)
415 {
416 	// map in a kernel stack
417 	void* stack_address = NULL;
418 	if (platform_allocate_region(&stack_address,
419 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE,
420 		B_READ_AREA | B_WRITE_AREA, false)
421 		!= B_OK) {
422 		panic("Unabled to allocate a stack");
423 	}
424 	gKernelArgs.cpu_kstack[0].start = fix_address((addr_t)stack_address);
425 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
426 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
427 	dprintf("Kernel stack at %#lx\n", gKernelArgs.cpu_kstack[0].start);
428 
429 	gKernelArgs.num_physical_memory_ranges = 0;
430 	insert_physical_memory_range((addr_t)gMemBase, gTotalMem);
431 
432 	gKernelArgs.num_virtual_allocated_ranges = 0;
433 	gKernelArgs.arch_args.num_virtual_ranges_to_keep = 0;
434 
435 	SetupPageTable();
436 	satp = GetSatp();
437 	dprintf("satp: %#" B_PRIx64 "\n", satp);
438 
439 	gKernelArgs.num_physical_allocated_ranges = 0;
440 	insert_physical_allocated_range((addr_t)gMemBase, gFreeMem - gMemBase);
441 
442 	sort_address_ranges(gKernelArgs.virtual_allocated_range, gKernelArgs.num_virtual_allocated_ranges);
443 }
444