xref: /haiku/src/system/kernel/arch/riscv64/arch_vm_translation_map.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2007-2010, François Revol, revol@free.fr.
3  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
4  * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights
5  *   reserved.
6  * Copyright 2019, Adrien Destugues, pulkomandy@pulkomandy.tk.
7  * Distributed under the terms of the MIT License.
8  *
9  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
10  * Distributed under the terms of the NewOS License.
11  */
12 
13 
14 #include <arch_cpu_defs.h>
15 #include <boot/kernel_args.h>
16 #include <KernelExport.h>
17 #include <kernel.h>
18 #include <vm/vm.h>
19 #include <vm/vm_priv.h>
20 #include <vm/VMAddressSpace.h>
21 #include <Clint.h>
22 #include <Htif.h>
23 #include <Plic.h>
24 
25 #include "RISCV64VMTranslationMap.h"
26 
27 
28 #define TRACE_VM_TMAP
29 #ifdef TRACE_VM_TMAP
30 #	define TRACE(x...) dprintf(x)
31 #else
32 #	define TRACE(x...) ;
33 #endif
34 
35 
36 ssize_t gVirtFromPhysOffset = 0;
37 
38 phys_addr_t sPageTable = 0;
39 char sPhysicalPageMapperData[sizeof(RISCV64VMPhysicalPageMapper)];
40 
41 
42 // TODO: Consolidate function with RISCV64VMTranslationMap
43 
44 static Pte*
45 LookupPte(addr_t virtAdr, bool alloc, kernel_args* args,
46 	phys_addr_t (*get_free_page)(kernel_args *))
47 {
48 	Pte *pte = (Pte*)VirtFromPhys(sPageTable);
49 	for (int level = 2; level > 0; level --) {
50 		pte += VirtAdrPte(virtAdr, level);
51 		if (!pte->isValid) {
52 			if (!alloc)
53 				return NULL;
54 			page_num_t ppn = get_free_page(args);
55 			if (ppn == 0)
56 				return NULL;
57 			memset((Pte*)VirtFromPhys(B_PAGE_SIZE * ppn), 0, B_PAGE_SIZE);
58 			Pte newPte {
59 				.isValid = true,
60 				.isGlobal = true,
61 				.ppn = ppn
62 			};
63 			pte->val = newPte.val;
64 		}
65 		pte = (Pte*)VirtFromPhys(B_PAGE_SIZE * pte->ppn);
66 	}
67 	pte += VirtAdrPte(virtAdr, 0);
68 	return pte;
69 }
70 
71 
72 static void
73 Map(addr_t virtAdr, phys_addr_t physAdr, uint64 flags, kernel_args* args,
74 	phys_addr_t (*get_free_page)(kernel_args *))
75 {
76 	// dprintf("Map(0x%" B_PRIxADDR ", 0x%" B_PRIxADDR ")\n", virtAdr, physAdr);
77 	Pte* pte = LookupPte(virtAdr, true, args, get_free_page);
78 	if (pte == NULL) panic("can't allocate page table");
79 
80 	Pte newPte {
81 		.isValid = true,
82 		.isGlobal = true, // we map only kernel pages here so always set global flag
83 		.isAccessed = true,
84 		.isDirty = true,
85 		.ppn = physAdr / B_PAGE_SIZE
86 	};
87 	newPte.val |= flags;
88 
89 	pte->val = newPte.val;
90 
91 	FlushTlbPage(virtAdr);
92 }
93 
94 
95 //#pragma mark -
96 
97 status_t
98 arch_vm_translation_map_init(kernel_args *args,
99 	VMPhysicalPageMapper** _physicalPageMapper)
100 {
101 	TRACE("vm_translation_map_init: entry\n");
102 
103 #ifdef TRACE_VM_TMAP
104 	TRACE("physical memory ranges:\n");
105 	for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
106 		phys_addr_t start = args->physical_memory_range[i].start;
107 		phys_addr_t end = start + args->physical_memory_range[i].size;
108 		TRACE("  %" B_PRIxPHYSADDR " - %" B_PRIxPHYSADDR "\n", start, end);
109 	}
110 
111 	TRACE("allocated physical ranges:\n");
112 	for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
113 		phys_addr_t start = args->physical_allocated_range[i].start;
114 		phys_addr_t end = start + args->physical_allocated_range[i].size;
115 		TRACE("  %" B_PRIxPHYSADDR " - %" B_PRIxPHYSADDR "\n", start, end);
116 	}
117 
118 	TRACE("allocated virtual ranges:\n");
119 	for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
120 		addr_t start = args->virtual_allocated_range[i].start;
121 		addr_t end = start + args->virtual_allocated_range[i].size;
122 		TRACE("  %" B_PRIxADDR " - %" B_PRIxADDR "\n", start, end);
123 	}
124 
125 	TRACE("kernel args ranges:\n");
126 	for (uint32 i = 0; i < args->num_kernel_args_ranges; i++) {
127 		phys_addr_t start = args->kernel_args_range[i].start;
128 		phys_addr_t end = start + args->kernel_args_range[i].size;
129 		TRACE("  %" B_PRIxPHYSADDR " - %" B_PRIxPHYSADDR "\n", start, end);
130 	}
131 #endif
132 
133 	sPageTable = SatpReg{.val = Satp()}.ppn * B_PAGE_SIZE;
134 
135 	dprintf("physMapBase: %#" B_PRIxADDR "\n", args->arch_args.physMap.start);
136 	dprintf("physMemBase: %#" B_PRIxADDR "\n", args->physical_memory_range[0].start);
137 	gVirtFromPhysOffset = args->arch_args.physMap.start - args->physical_memory_range[0].start;
138 
139 	arch_cpu_disable_user_access();
140 
141 	*_physicalPageMapper = new(&sPhysicalPageMapperData)
142 		RISCV64VMPhysicalPageMapper();
143 
144 	return B_OK;
145 }
146 
147 
148 status_t
149 arch_vm_translation_map_init_post_sem(kernel_args *args)
150 {
151 	return B_OK;
152 }
153 
154 
155 status_t
156 arch_vm_translation_map_init_post_area(kernel_args *args)
157 {
158 	TRACE("vm_translation_map_init_post_area: entry\n");
159 	return B_OK;
160 }
161 
162 
163 status_t
164 arch_vm_translation_map_early_map(kernel_args *args,
165 	addr_t virtAdr, phys_addr_t physAdr, uint8 attributes,
166 	phys_addr_t (*get_free_page)(kernel_args *))
167 {
168 	//dprintf("early_map(%#" B_PRIxADDR ", %#" B_PRIxADDR ")\n", virtAdr, physAdr);
169 	Pte flags {
170 		.isRead  = (attributes & B_KERNEL_READ_AREA)    != 0,
171 		.isWrite = (attributes & B_KERNEL_WRITE_AREA)   != 0,
172 		.isExec  = (attributes & B_KERNEL_EXECUTE_AREA) != 0,
173 	};
174 	Map(virtAdr, physAdr, flags.val, args, get_free_page);
175 	return B_OK;
176 }
177 
178 
179 status_t
180 arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
181 {
182 	*_map = new(std::nothrow) RISCV64VMTranslationMap(kernel,
183 		(kernel) ? sPageTable : 0);
184 
185 	if (*_map == NULL)
186 		return B_NO_MEMORY;
187 
188 	return B_OK;
189 }
190 
191 
192 bool
193 arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
194 	uint32 protection)
195 {
196 	return virtualAddress != 0;
197 }
198