xref: /haiku/src/system/boot/platform/riscv/mmu.cpp (revision ed24eb5ff12640d052171c6a7feba37fab8a75d1)
1 /*
2  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <kernel.h>
18 #include <AutoDeleter.h>
19 
20 #include <OS.h>
21 
22 #include <string.h>
23 
24 
25 struct MemoryRegion
26 {
27 	MemoryRegion* next;
28 	addr_t virtAdr;
29 	phys_addr_t physAdr;
30 	size_t size;
31 	uint32 protection;
32 };
33 
34 
35 extern uint8 gStackEnd;
36 
37 uint8* gMemBase = NULL;
38 size_t gTotalMem = 0;
39 uint8* gFreeMem = &gStackEnd;
40 addr_t gFreeVirtMem = KERNEL_LOAD_BASE;
41 
42 MemoryRegion* sRegions = NULL;
43 
44 ssize_t gVirtFromPhysOffset = 0;
45 phys_addr_t sPageTable = 0;
46 
47 
48 static void
49 WritePteFlags(uint32 flags)
50 {
51 	bool first = true;
52 	dprintf("{");
53 	for (uint32 i = 0; i < 32; i++) {
54 		if ((1 << i) & flags) {
55 			if (first) first = false; else dprintf(", ");
56 			switch (i) {
57 			case pteValid:    dprintf("valid"); break;
58 			case pteRead:     dprintf("read"); break;
59 			case pteWrite:    dprintf("write"); break;
60 			case pteExec:     dprintf("exec"); break;
61 			case pteUser:     dprintf("user"); break;
62 			case pteGlobal:   dprintf("global"); break;
63 			case pteAccessed: dprintf("accessed"); break;
64 			case pteDirty:    dprintf("dirty"); break;
65 			default:          dprintf("%" B_PRIu32, i);
66 			}
67 		}
68 	}
69 	dprintf("}");
70 }
71 
72 
73 static phys_addr_t
74 AllocPhysPages(size_t size)
75 {
76 	size = ROUNDUP(size, B_PAGE_SIZE);
77 	phys_addr_t adr = ROUNDUP((addr_t)gFreeMem, B_PAGE_SIZE);
78 
79 	if (adr + size - (addr_t)gMemBase > gTotalMem)
80 		return 0;
81 
82 	gFreeMem = (uint8*)(adr + size);
83 
84 	return adr;
85 }
86 
87 
88 static phys_addr_t
89 AllocPhysPage()
90 {
91 	return AllocPhysPages(B_PAGE_SIZE);
92 }
93 
94 
95 static void
96 FreePhysPages(phys_addr_t physAdr, size_t size)
97 {
98 	if (physAdr + size == (phys_addr_t)gFreeMem)
99 		gFreeMem -= size;
100 }
101 
102 
103 static phys_addr_t
104 AllocVirtPages(size_t size)
105 {
106 	size = ROUNDUP(size, B_PAGE_SIZE);
107 	phys_addr_t adr = ROUNDUP(gFreeVirtMem, B_PAGE_SIZE);
108 	gFreeVirtMem = adr + size;
109 
110 	return adr;
111 }
112 
113 
114 static void
115 FreeVirtPages(addr_t virtAdr, size_t size)
116 {
117 	if (virtAdr + size == gFreeVirtMem)
118 		gFreeVirtMem -= size;
119 }
120 
121 
122 static inline void*
123 VirtFromPhys(phys_addr_t physAdr)
124 {
125 	return (void*)physAdr;
126 }
127 
128 
129 static inline phys_addr_t
130 PhysFromVirt(void* virtAdr)
131 {
132 	return (phys_addr_t)virtAdr;
133 }
134 
135 
136 static Pte*
137 LookupPte(addr_t virtAdr, bool alloc)
138 {
139 	Pte *pte = (Pte*)VirtFromPhys(sPageTable);
140 	for (int level = 2; level > 0; level--) {
141 		pte += VirtAdrPte(virtAdr, level);
142 		if (!((1 << pteValid) & pte->flags)) {
143 			if (!alloc)
144 				return NULL;
145 			pte->ppn = AllocPhysPage() / B_PAGE_SIZE;
146 			if (pte->ppn == 0)
147 				return NULL;
148 			memset((Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn), 0, B_PAGE_SIZE);
149 			pte->flags |= (1 << pteValid);
150 		}
151 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
152 	}
153 	pte += VirtAdrPte(virtAdr, 0);
154 	return pte;
155 }
156 
157 
158 static void
159 Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags)
160 {
161 	// dprintf("Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ")\n", virtAdr, physAdr);
162 	Pte* pte = LookupPte(virtAdr, true);
163 	if (pte == NULL)
164 		panic("can't allocate page table");
165 
166 	pte->ppn = physAdr / B_PAGE_SIZE;
167 	pte->flags = (1 << pteValid) | (1 << pteAccessed) | (1 << pteDirty) | flags;
168 }
169 
170 
171 static void
172 MapRange(addr_t virtAdr, phys_addr_t physAdr, size_t size, uint64 flags)
173 {
174 	dprintf("MapRange(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ", 0x%"
175 		B_PRIxADDR ", ", virtAdr, physAdr, size);
176 	WritePteFlags(flags);
177 	dprintf(")\n");
178 	for (size_t i = 0; i < size; i += B_PAGE_SIZE)
179 		Map(virtAdr + i, physAdr + i, flags);
180 
181 	ASSERT_ALWAYS(insert_virtual_allocated_range(virtAdr, size) >= B_OK);
182 }
183 
184 
185 static void
186 MapRangeIdentity(addr_t adr, size_t size, uint64 flags)
187 {
188 	MapRange(adr, adr, size, flags);
189 }
190 
191 
192 static void
193 MapAddrRange(addr_range& range, uint64 flags)
194 {
195 	phys_addr_t physAdr = range.start;
196 	range.start = AllocVirtPages(range.size);
197 
198 	MapRange(range.start, physAdr, range.size, flags);
199 
200 	if (gKernelArgs.arch_args.num_virtual_ranges_to_keep
201 		>= MAX_VIRTUAL_RANGES_TO_KEEP)
202 		panic("too many virtual ranges to keep");
203 
204 	gKernelArgs.arch_args.virtual_ranges_to_keep[
205 		gKernelArgs.arch_args.num_virtual_ranges_to_keep++] = range;
206 }
207 
208 
209 static void
210 PreallocKernelRange()
211 {
212 	Pte *root = (Pte*)VirtFromPhys(sPageTable);
213 	for (uint64 i = VirtAdrPte(KERNEL_BASE, 2); i <= VirtAdrPte(KERNEL_TOP, 2);
214 		i++) {
215 		Pte* pte = &root[i];
216 		pte->ppn = AllocPhysPage() / B_PAGE_SIZE;
217 		if (pte->ppn == 0) panic("can't alloc early physical page");
218 		memset(VirtFromPhys(B_PAGE_SIZE * pte->ppn), 0, B_PAGE_SIZE);
219 		pte->flags |= (1 << pteValid);
220 	}
221 }
222 
223 
224 static void
225 SetupPageTable()
226 {
227 	sPageTable = AllocPhysPage();
228 	memset(VirtFromPhys(sPageTable), 0, B_PAGE_SIZE);
229 
230 	PreallocKernelRange();
231 
232 	// Physical memory mapping
233 	gKernelArgs.arch_args.physMap.size
234 		= gKernelArgs.physical_memory_range[0].size;
235 	gKernelArgs.arch_args.physMap.start = KERNEL_TOP + 1
236 		- gKernelArgs.arch_args.physMap.size;
237 	MapRange(gKernelArgs.arch_args.physMap.start,
238 		gKernelArgs.physical_memory_range[0].start,
239 		gKernelArgs.arch_args.physMap.size,
240 		(1 << pteRead) | (1 << pteWrite));
241 
242 	// Boot loader
243 	MapRangeIdentity((addr_t)gMemBase, &gStackEnd - gMemBase,
244 		(1 << pteRead) | (1 << pteWrite) | (1 << pteExec));
245 
246 	// Memory regions
247 	MemoryRegion* region;
248 	for (region = sRegions; region != NULL; region = region->next) {
249 		uint64 flags = 0;
250 		if ((region->protection & B_READ_AREA) != 0)
251 			flags |= (1 << pteRead);
252 		if ((region->protection & B_WRITE_AREA) != 0)
253 			flags |= (1 << pteWrite);
254 		if ((region->protection & B_EXECUTE_AREA) != 0)
255 			flags |= (1 << pteExec);
256 		MapRange(region->virtAdr, region->physAdr, region->size, flags);
257 	}
258 
259 	// Devices
260 	MapAddrRange(gKernelArgs.arch_args.clint, (1 << pteRead) | (1 << pteWrite));
261 	MapAddrRange(gKernelArgs.arch_args.htif, (1 << pteRead) | (1 << pteWrite));
262 	MapAddrRange(gKernelArgs.arch_args.plic, (1 << pteRead) | (1 << pteWrite));
263 	if (strcmp(gKernelArgs.arch_args.uart.kind, "") != 0) {
264 		MapAddrRange(gKernelArgs.arch_args.uart.regs,
265 			(1 << pteRead) | (1 << pteWrite));
266 	}
267 }
268 
269 
270 static uint64
271 GetSatp()
272 {
273 	return SatpReg{
274 		.ppn = sPageTable / B_PAGE_SIZE,
275 		.asid = 0,
276 		.mode = satpModeSv39
277 	}.val;
278 }
279 
280 
281 //	#pragma mark -
282 
283 extern "C" status_t
284 platform_allocate_region(void** address, size_t size, uint8 protection,
285 	bool exactAddress)
286 {
287 	size = ROUNDUP(size, B_PAGE_SIZE);
288 
289 	if (exactAddress)
290 		return B_ERROR;
291 
292 	ObjectDeleter<MemoryRegion> region(new(std::nothrow) MemoryRegion());
293 	if (!region.IsSet())
294 		return B_NO_MEMORY;
295 
296 	region->physAdr = AllocPhysPages(size);
297 	if (region->physAdr == 0)
298 		return B_NO_MEMORY;
299 
300 	region->virtAdr = AllocVirtPages(size);
301 	region->size = size;
302 	region->protection = protection;
303 
304 	*address = (void*)region->physAdr;
305 
306 	region->next = sRegions;
307 	sRegions = region.Detach();
308 
309 	return B_OK;
310 }
311 
312 
313 extern "C" status_t
314 platform_free_region(void* address, size_t size)
315 {
316 	MemoryRegion* prev = NULL;
317 	MemoryRegion* region = sRegions;
318 	while (region != NULL && !(region->physAdr == (phys_addr_t)address)) {
319 		prev = region;
320 		region = region->next;
321 	}
322 	if (region == NULL) {
323 		panic("platform_free_region: address %p is not allocated\n", address);
324 		return B_ERROR;
325 	}
326 	FreePhysPages(region->physAdr, region->size);
327 	FreeVirtPages(region->virtAdr, region->size);
328 	if (prev == NULL)
329 		sRegions = region->next;
330 	else
331 		prev->next = region->next;
332 
333 	delete region;
334 
335 	return B_OK;
336 }
337 
338 
339 void
340 platform_release_heap(struct stage2_args* args, void* base)
341 {
342 }
343 
344 
345 status_t
346 platform_init_heap(struct stage2_args* args, void** _base, void** _top)
347 {
348 	addr_t heap = AllocPhysPages(args->heap_size);
349 	if (heap == 0)
350 		return B_NO_MEMORY;
351 
352 	*_base = (void*)heap;
353 	*_top = (void*)(heap + args->heap_size);
354 	return B_OK;
355 }
356 
357 
358 status_t
359 platform_bootloader_address_to_kernel_address(void* address, addr_t* result)
360 {
361 	MemoryRegion* region = sRegions;
362 	while (region != NULL && !((phys_addr_t)address >= region->physAdr
363 		&& (phys_addr_t)address < region->physAdr + region->size))
364 		region = region->next;
365 
366 	if (region == NULL)
367 		return B_ERROR;
368 
369 	*result = (addr_t)address - region->physAdr + region->virtAdr;
370 	return B_OK;
371 }
372 
373 
374 status_t
375 platform_kernel_address_to_bootloader_address(addr_t address, void** result)
376 {
377 	MemoryRegion* region = sRegions;
378 	while (region != NULL && !((phys_addr_t)address >= region->virtAdr
379 		&& (phys_addr_t)address < region->virtAdr + region->size))
380 		region = region->next;
381 
382 	if (region == NULL)
383 		return B_ERROR;
384 
385 	*result = (void*)(address - region->virtAdr + region->physAdr);
386 	return B_OK;
387 }
388 
389 
390 //	#pragma mark -
391 
392 void
393 mmu_init(void)
394 {
395 }
396 
397 
398 void
399 mmu_init_for_kernel(addr_t& satp)
400 {
401 	// map in a kernel stack
402 	void* stack_address = NULL;
403 	if (platform_allocate_region(&stack_address,
404 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE,
405 		B_READ_AREA | B_WRITE_AREA, false)
406 		!= B_OK) {
407 		panic("Unabled to allocate a stack");
408 	}
409 	gKernelArgs.cpu_kstack[0].start = fix_address((addr_t)stack_address);
410 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
411 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
412 	dprintf("Kernel stack at %#lx\n", gKernelArgs.cpu_kstack[0].start);
413 
414 	gKernelArgs.num_physical_memory_ranges = 0;
415 	insert_physical_memory_range((addr_t)gMemBase, gTotalMem);
416 
417 	gKernelArgs.num_virtual_allocated_ranges = 0;
418 	gKernelArgs.arch_args.num_virtual_ranges_to_keep = 0;
419 
420 	SetupPageTable();
421 	satp = GetSatp();
422 	dprintf("satp: %#" B_PRIx64 "\n", satp);
423 
424 	gKernelArgs.num_physical_allocated_ranges = 0;
425 	insert_physical_allocated_range((addr_t)gMemBase, gFreeMem - gMemBase);
426 
427 	sort_address_ranges(gKernelArgs.virtual_allocated_range, gKernelArgs.num_virtual_allocated_ranges);
428 }
429