xref: /haiku/src/system/boot/platform/riscv/mmu.cpp (revision 0d07b1d98a7c4595b39324f0603d3b3005ad38d9)
1 /*
2  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <kernel.h>
18 #include <AutoDeleter.h>
19 
20 #include <OS.h>
21 
22 #include <string.h>
23 
24 
25 struct MemoryRegion
26 {
27 	MemoryRegion* next;
28 	addr_t virtAdr;
29 	phys_addr_t physAdr;
30 	size_t size;
31 	uint32 protection;
32 };
33 
34 
35 extern uint8 gStackEnd;
36 
37 uint8* gMemBase = NULL;
38 size_t gTotalMem = 0;
39 uint8* gFreeMem = &gStackEnd;
40 addr_t gFreeVirtMem = KERNEL_LOAD_BASE;
41 
42 MemoryRegion* sRegions = NULL;
43 
44 ssize_t gVirtFromPhysOffset = 0;
45 phys_addr_t sPageTable = 0;
46 
47 
48 static void
49 WritePteFlags(uint32 flags)
50 {
51 	bool first = true;
52 	dprintf("{");
53 	for (uint32 i = 0; i < 32; i++) {
54 		if ((1 << i) & flags) {
55 			if (first) first = false; else dprintf(", ");
56 			switch (i) {
57 			case pteValid:    dprintf("valid"); break;
58 			case pteRead:     dprintf("read"); break;
59 			case pteWrite:    dprintf("write"); break;
60 			case pteExec:     dprintf("exec"); break;
61 			case pteUser:     dprintf("user"); break;
62 			case pteGlobal:   dprintf("global"); break;
63 			case pteAccessed: dprintf("accessed"); break;
64 			case pteDirty:    dprintf("dirty"); break;
65 			default:          dprintf("%" B_PRIu32, i);
66 			}
67 		}
68 	}
69 	dprintf("}");
70 }
71 
72 
73 static phys_addr_t
74 AllocPhysPages(size_t size)
75 {
76 	size = ROUNDUP(size, B_PAGE_SIZE);
77 	phys_addr_t adr = ROUNDUP((addr_t)gFreeMem, B_PAGE_SIZE);
78 
79 	if (adr + size - (addr_t)gMemBase > gTotalMem)
80 		return 0;
81 
82 	gFreeMem = (uint8*)(adr + size);
83 
84 	return adr;
85 }
86 
87 
88 static phys_addr_t
89 AllocPhysPage()
90 {
91 	return AllocPhysPages(B_PAGE_SIZE);
92 }
93 
94 
95 static void
96 FreePhysPages(phys_addr_t physAdr, size_t size)
97 {
98 	if (physAdr + size == (phys_addr_t)gFreeMem)
99 		gFreeMem -= size;
100 }
101 
102 
103 static phys_addr_t
104 AllocVirtPages(size_t size)
105 {
106 	size = ROUNDUP(size, B_PAGE_SIZE);
107 	phys_addr_t adr = ROUNDUP(gFreeVirtMem, B_PAGE_SIZE);
108 	gFreeVirtMem = adr + size;
109 
110 	return adr;
111 }
112 
113 
114 static void
115 FreeVirtPages(addr_t virtAdr, size_t size)
116 {
117 	if (virtAdr + size == gFreeVirtMem)
118 		gFreeVirtMem -= size;
119 }
120 
121 
122 static inline void*
123 VirtFromPhys(phys_addr_t physAdr)
124 {
125 	return (void*)physAdr;
126 }
127 
128 
129 static inline phys_addr_t
130 PhysFromVirt(void* virtAdr)
131 {
132 	return (phys_addr_t)virtAdr;
133 }
134 
135 
136 static Pte*
137 LookupPte(addr_t virtAdr, bool alloc)
138 {
139 	Pte *pte = (Pte*)VirtFromPhys(sPageTable);
140 	for (int level = 2; level > 0; level--) {
141 		pte += VirtAdrPte(virtAdr, level);
142 		if (!((1 << pteValid) & pte->flags)) {
143 			if (!alloc)
144 				return NULL;
145 			pte->ppn = AllocPhysPage() / B_PAGE_SIZE;
146 			if (pte->ppn == 0)
147 				return NULL;
148 			memset((Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn), 0, B_PAGE_SIZE);
149 			pte->flags |= (1 << pteValid) | (IS_KERNEL_ADDRESS(virtAdr) ? (1 << pteGlobal) : 0);
150 		}
151 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
152 	}
153 	pte += VirtAdrPte(virtAdr, 0);
154 	return pte;
155 }
156 
157 
158 static void
159 Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags)
160 {
161 	// dprintf("Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ")\n", virtAdr, physAdr);
162 	Pte* pte = LookupPte(virtAdr, true);
163 	if (pte == NULL)
164 		panic("can't allocate page table");
165 
166 	pte->ppn = physAdr / B_PAGE_SIZE;
167 	pte->flags = (1 << pteValid) | (1 << pteAccessed) | (1 << pteDirty)
168 		| (IS_KERNEL_ADDRESS(virtAdr) ? (1 << pteGlobal) : 0)
169 		| flags;
170 }
171 
172 
173 static void
174 MapRange(addr_t virtAdr, phys_addr_t physAdr, size_t size, uint64 flags)
175 {
176 	dprintf("MapRange(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ", 0x%"
177 		B_PRIxADDR ", ", virtAdr, physAdr, size);
178 	WritePteFlags(flags);
179 	dprintf(")\n");
180 	for (size_t i = 0; i < size; i += B_PAGE_SIZE)
181 		Map(virtAdr + i, physAdr + i, flags);
182 
183 	ASSERT_ALWAYS(insert_virtual_allocated_range(virtAdr, size) >= B_OK);
184 }
185 
186 
187 static void
188 MapRangeIdentity(addr_t adr, size_t size, uint64 flags)
189 {
190 	MapRange(adr, adr, size, flags);
191 }
192 
193 
194 static void
195 MapAddrRange(addr_range& range, uint64 flags)
196 {
197 	phys_addr_t physAdr = range.start;
198 	range.start = AllocVirtPages(range.size);
199 
200 	MapRange(range.start, physAdr, range.size, flags);
201 
202 	if (gKernelArgs.arch_args.num_virtual_ranges_to_keep
203 		>= MAX_VIRTUAL_RANGES_TO_KEEP)
204 		panic("too many virtual ranges to keep");
205 
206 	gKernelArgs.arch_args.virtual_ranges_to_keep[
207 		gKernelArgs.arch_args.num_virtual_ranges_to_keep++] = range;
208 }
209 
210 
211 static void
212 PreallocKernelRange()
213 {
214 	Pte *root = (Pte*)VirtFromPhys(sPageTable);
215 	for (uint64 i = VirtAdrPte(KERNEL_BASE, 2); i <= VirtAdrPte(KERNEL_TOP, 2);
216 		i++) {
217 		Pte* pte = &root[i];
218 		pte->ppn = AllocPhysPage() / B_PAGE_SIZE;
219 		if (pte->ppn == 0) panic("can't alloc early physical page");
220 		memset(VirtFromPhys(B_PAGE_SIZE * pte->ppn), 0, B_PAGE_SIZE);
221 		pte->flags |= (1 << pteValid) | (1 << pteGlobal);
222 	}
223 }
224 
225 
226 static void
227 SetupPageTable()
228 {
229 	sPageTable = AllocPhysPage();
230 	memset(VirtFromPhys(sPageTable), 0, B_PAGE_SIZE);
231 
232 	PreallocKernelRange();
233 
234 	// Physical memory mapping
235 	gKernelArgs.arch_args.physMap.size
236 		= gKernelArgs.physical_memory_range[0].size;
237 	gKernelArgs.arch_args.physMap.start = KERNEL_TOP + 1
238 		- gKernelArgs.arch_args.physMap.size;
239 	MapRange(gKernelArgs.arch_args.physMap.start,
240 		gKernelArgs.physical_memory_range[0].start,
241 		gKernelArgs.arch_args.physMap.size,
242 		(1 << pteRead) | (1 << pteWrite));
243 
244 	// Boot loader
245 	MapRangeIdentity((addr_t)gMemBase, &gStackEnd - gMemBase,
246 		(1 << pteRead) | (1 << pteWrite) | (1 << pteExec));
247 
248 	// Memory regions
249 	MemoryRegion* region;
250 	for (region = sRegions; region != NULL; region = region->next) {
251 		uint64 flags = 0;
252 		if ((region->protection & B_READ_AREA) != 0)
253 			flags |= (1 << pteRead);
254 		if ((region->protection & B_WRITE_AREA) != 0)
255 			flags |= (1 << pteWrite);
256 		if ((region->protection & B_EXECUTE_AREA) != 0)
257 			flags |= (1 << pteExec);
258 		MapRange(region->virtAdr, region->physAdr, region->size, flags);
259 	}
260 
261 	// Devices
262 	MapAddrRange(gKernelArgs.arch_args.clint, (1 << pteRead) | (1 << pteWrite));
263 	MapAddrRange(gKernelArgs.arch_args.htif, (1 << pteRead) | (1 << pteWrite));
264 	MapAddrRange(gKernelArgs.arch_args.plic, (1 << pteRead) | (1 << pteWrite));
265 	if (strcmp(gKernelArgs.arch_args.uart.kind, "") != 0) {
266 		MapAddrRange(gKernelArgs.arch_args.uart.regs,
267 			(1 << pteRead) | (1 << pteWrite));
268 	}
269 }
270 
271 
272 static uint64
273 GetSatp()
274 {
275 	return SatpReg{
276 		.ppn = sPageTable / B_PAGE_SIZE,
277 		.asid = 0,
278 		.mode = satpModeSv39
279 	}.val;
280 }
281 
282 
283 //	#pragma mark -
284 
285 extern "C" status_t
286 platform_allocate_region(void** address, size_t size, uint8 protection,
287 	bool exactAddress)
288 {
289 	size = ROUNDUP(size, B_PAGE_SIZE);
290 
291 	if (exactAddress)
292 		return B_ERROR;
293 
294 	ObjectDeleter<MemoryRegion> region(new(std::nothrow) MemoryRegion());
295 	if (!region.IsSet())
296 		return B_NO_MEMORY;
297 
298 	region->physAdr = AllocPhysPages(size);
299 	if (region->physAdr == 0)
300 		return B_NO_MEMORY;
301 
302 	region->virtAdr = AllocVirtPages(size);
303 	region->size = size;
304 	region->protection = protection;
305 
306 	*address = (void*)region->physAdr;
307 
308 	region->next = sRegions;
309 	sRegions = region.Detach();
310 
311 	return B_OK;
312 }
313 
314 
315 extern "C" status_t
316 platform_free_region(void* address, size_t size)
317 {
318 	MemoryRegion* prev = NULL;
319 	MemoryRegion* region = sRegions;
320 	while (region != NULL && !(region->physAdr == (phys_addr_t)address)) {
321 		prev = region;
322 		region = region->next;
323 	}
324 	if (region == NULL) {
325 		panic("platform_free_region: address %p is not allocated\n", address);
326 		return B_ERROR;
327 	}
328 	FreePhysPages(region->physAdr, region->size);
329 	FreeVirtPages(region->virtAdr, region->size);
330 	if (prev == NULL)
331 		sRegions = region->next;
332 	else
333 		prev->next = region->next;
334 
335 	delete region;
336 
337 	return B_OK;
338 }
339 
340 
341 void
342 platform_release_heap(struct stage2_args* args, void* base)
343 {
344 }
345 
346 
347 status_t
348 platform_init_heap(struct stage2_args* args, void** _base, void** _top)
349 {
350 	addr_t heap = AllocPhysPages(args->heap_size);
351 	if (heap == 0)
352 		return B_NO_MEMORY;
353 
354 	*_base = (void*)heap;
355 	*_top = (void*)(heap + args->heap_size);
356 	return B_OK;
357 }
358 
359 
360 status_t
361 platform_bootloader_address_to_kernel_address(void* address, addr_t* result)
362 {
363 	MemoryRegion* region = sRegions;
364 	while (region != NULL && !((phys_addr_t)address >= region->physAdr
365 		&& (phys_addr_t)address < region->physAdr + region->size))
366 		region = region->next;
367 
368 	if (region == NULL)
369 		return B_ERROR;
370 
371 	*result = (addr_t)address - region->physAdr + region->virtAdr;
372 	return B_OK;
373 }
374 
375 
376 status_t
377 platform_kernel_address_to_bootloader_address(addr_t address, void** result)
378 {
379 	MemoryRegion* region = sRegions;
380 	while (region != NULL && !((phys_addr_t)address >= region->virtAdr
381 		&& (phys_addr_t)address < region->virtAdr + region->size))
382 		region = region->next;
383 
384 	if (region == NULL)
385 		return B_ERROR;
386 
387 	*result = (void*)(address - region->virtAdr + region->physAdr);
388 	return B_OK;
389 }
390 
391 
392 //	#pragma mark -
393 
394 void
395 mmu_init(void)
396 {
397 }
398 
399 
400 void
401 mmu_init_for_kernel(addr_t& satp)
402 {
403 	// map in a kernel stack
404 	void* stack_address = NULL;
405 	if (platform_allocate_region(&stack_address,
406 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE,
407 		B_READ_AREA | B_WRITE_AREA, false)
408 		!= B_OK) {
409 		panic("Unabled to allocate a stack");
410 	}
411 	gKernelArgs.cpu_kstack[0].start = fix_address((addr_t)stack_address);
412 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
413 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
414 	dprintf("Kernel stack at %#lx\n", gKernelArgs.cpu_kstack[0].start);
415 
416 	gKernelArgs.num_physical_memory_ranges = 0;
417 	insert_physical_memory_range((addr_t)gMemBase, gTotalMem);
418 
419 	gKernelArgs.num_virtual_allocated_ranges = 0;
420 	gKernelArgs.arch_args.num_virtual_ranges_to_keep = 0;
421 
422 	SetupPageTable();
423 	satp = GetSatp();
424 	dprintf("satp: %#" B_PRIx64 "\n", satp);
425 
426 	gKernelArgs.num_physical_allocated_ranges = 0;
427 	insert_physical_allocated_range((addr_t)gMemBase, gFreeMem - gMemBase);
428 
429 	sort_address_ranges(gKernelArgs.virtual_allocated_range, gKernelArgs.num_virtual_allocated_ranges);
430 }
431