xref: /haiku/src/system/boot/arch/m68k/mmu_040.cpp (revision c2f0a314a012bea8e4ebb35b8ce9e1a85c798727)
1 /*
2  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <kernel.h>
18 
19 #include <OS.h>
20 
21 #include <string.h>
22 
23 #include "arch_040_mmu.h"
24 
25 
26 //#define TRACE_MMU
27 #ifdef TRACE_MMU
28 #	define TRACE(x) dprintf x
29 #else
30 #	define TRACE(x) ;
31 #endif
32 
33 
34 extern page_root_entry *gPageRoot;
35 
36 
37 static void
38 initialize(void)
39 {
40 	TRACE(("mmu_040:initialize\n"));
41 }
42 
43 
44 static status_t
45 set_tt(int which, addr_t pa, size_t len, uint32 perms /* NOTUSED */)
46 {
47 	TRACE(("mmu_040:set_tt(%d, 0x%lx, 0x%lx, 0x%08lx)\n", which, pa, len, perms));
48 	uint32 mask;
49 	uint32 ttr = 0;
50 	mask = 0x0000ffff;
51 	if (len) {
52 		len = (len >> 24) & 0x00ff;
53 		while (len >>= 1)
54 			mask <<= 1;
55 		// enable, super only, upa=0,
56 		// cachable write-through, rw
57 		ttr = 0x0a000;
58 		ttr |= (pa & 0xff000000);
59 		ttr |= (mask & 0x00ff0000);
60 	}
61 	TRACE(("mmu_040:set_tt: 0x%08lx\n", ttr));
62 
63 
64 	switch (which) {
65 		case 0:
66 			asm volatile(  \
67 				"movec %0,%%dtt0\n"				\
68 				"movec %0,%%itt0\n"				\
69 				: : "d"(ttr));
70 			break;
71 		case 1:
72 			asm volatile(  \
73 				"movec %0,%%dtt1\n"				\
74 				"movec %0,%%itt1\n"				\
75 				: : "d"(ttr));
76 			break;
77 		default:
78 			return EINVAL;
79 	}
80 	return B_OK;
81 }
82 
83 
84 static status_t
85 load_rp(addr_t pa)
86 {
87 	TRACE(("mmu_040:load_rp(0x%lx)\n", pa));
88 	// sanity check
89 	if (pa & ((1 << 9) - 1)) {
90 		panic("mmu root pointer missaligned!");
91 		return EINVAL;
92 	}
93 	// make sure it's empty
94 	page_directory_entry_scalar *pr = (page_directory_entry_scalar *)pa;
95 	for (int32 j = 0; j < NUM_ROOTENT_PER_TBL; j++)
96 		pr[j] = DFL_ROOTENT_VAL;
97 
98 	/* mc68040 user's manual, 6-37 */
99 	/* pflush before... why not after ? */
100 	asm volatile(		   \
101 		"pflusha\n"		   \
102 		"movec %0,%%srp\n" \
103 		"movec %0,%%urp\n" \
104 		"pflusha\n"		   \
105 		: : "d"(pa));
106 	return B_OK;
107 }
108 
109 
110 static status_t
111 allocate_kernel_pgdirs(void)
112 {
113 	page_root_entry *pr = gPageRoot;
114 	page_directory_entry *pd;
115 	addr_t tbl;
116 	int i;
117 
118 	// we'll fill in the 2nd half with ready made page dirs
119 	for (i = NUM_ROOTENT_PER_TBL/2; i < NUM_ROOTENT_PER_TBL; i++) {
120 		if (i % NUM_DIRTBL_PER_PAGE)
121 			tbl += SIZ_DIRTBL;
122 		else
123 			tbl = mmu_get_next_page_tables();
124 		pr[i].addr = TA_TO_PREA(tbl);
125 		pr[i].type = DT_ROOT;
126 		pd = (page_directory_entry *)tbl;
127 		for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
128 			*(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
129 	}
130 	return B_OK;
131 }
132 
133 
134 static status_t
135 enable_paging(void)
136 {
137 	TRACE(("mmu_040:enable_paging\n"));
138 	uint16 tcr = 0x8000; // Enable, 4K page size
139 	asm volatile( \
140 		"pflusha\n"		   \
141 		"movec %0,%%tcr\n" \
142 		"pflusha\n"		   \
143 		: : "d"(tcr));
144 	return B_OK;
145 }
146 
147 
148 static status_t
149 add_page_table(addr_t virtualAddress)
150 {
151 	page_root_entry *pr = gPageRoot;
152 	page_directory_entry *pd;
153 	page_table_entry *pt;
154 	addr_t tbl;
155 	uint32 index;
156 	uint32 i;
157 
158 	TRACE(("mmu->add_page_table(base = %p)\n", (void *)virtualAddress));
159 
160 	// everything much simpler here because pa = va
161 	// thanks to transparent translation
162 
163 	index = VADDR_TO_PRENT(virtualAddress);
164 	if (pr[index].type != DT_ROOT)
165 		panic("invalid page root entry %d\n", index);
166 #if 0
167 	// not needed anymore
168 	if (pr[index].type != DT_ROOT) {
169 		unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
170 		//TRACE(("missing page root entry %d ai %d\n", index, aindex));
171 		tbl = mmu_get_next_page_tables();
172 		if (!tbl)
173 			return ENOMEM;
174 		// for each pgdir on the allocated page:
175 		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
176 			page_root_entry *apr = &pr[aindex + i];
177 			apr->addr = TA_TO_PREA(tbl);
178 			apr->type = DT_ROOT;
179 			//TRACE(("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr));
180 			// clear the table
181 			//TRACE(("clearing table[%d]\n", i));
182 			pd = (page_directory_entry *)tbl;
183 			for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
184 				*(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
185 			tbl += SIZ_DIRTBL;
186 		}
187 	}
188 #endif
189 	pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
190 
191 	index = VADDR_TO_PDENT(virtualAddress);
192 	if (pd[index].type != DT_DIR) {
193 		unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
194 		//TRACE(("missing page dir entry %d ai %d\n", index, aindex));
195 		tbl = mmu_get_next_page_tables();
196 		if (!tbl)
197 			return ENOMEM;
198 		// for each pgdir on the allocated page:
199 		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
200 			page_directory_entry *apd = &pd[aindex + i];
201 			apd->addr = TA_TO_PDEA(tbl);
202 			apd->type = DT_DIR;
203 			// clear the table
204 			//TRACE(("clearing table[%d]\n", i));
205 			pt = (page_table_entry *)tbl;
206 			for (int32 j = 0; j < NUM_PAGEENT_PER_TBL; j++)
207 				*(page_table_entry_scalar *)(&pt[j]) = DFL_PAGEENT_VAL;
208 			tbl += SIZ_PAGETBL;
209 		}
210 	}
211 #if 0
212 	pt = PDE_TO_TA(pd[index]);
213 
214 	index = VADDR_TO_PTENT(virtualAddress);
215 	pt[index].addr = TA_TO_PTEA(0xdeadb00b);
216 	pt[index].supervisor = 1;
217 	pt[index].type = DT_PAGE;
218 #endif
219 	return B_OK;
220 }
221 
222 
223 static page_table_entry *
224 lookup_pte(addr_t virtualAddress)
225 {
226 	page_root_entry *pr = gPageRoot;
227 	page_directory_entry *pd;
228 	page_table_entry *pt;
229 	uint32 rindex, dindex, pindex;
230 
231 	rindex = VADDR_TO_PRENT(virtualAddress);
232 	if (pr[rindex].type != DT_ROOT)
233 		panic("lookup_pte: invalid entry pgrt[%d]", rindex);
234 	pd = (page_directory_entry *)PRE_TO_TA(pr[rindex]);
235 
236 	dindex = VADDR_TO_PDENT(virtualAddress);
237 	if (pd[dindex].type != DT_DIR)
238 		panic("lookup_pte: invalid entry pgrt[%d] prdir[%d]", rindex, dindex);
239 	pt = (page_table_entry *)PDE_TO_TA(pd[dindex]);
240 
241 	pindex = VADDR_TO_PTENT(virtualAddress);
242 #if 0 // of course, it's used in map_page!
243 	if (pt[pindex].type != DT_PAGE)
244 		panic("lookup_pte: invalid entry pgrt[%d] prdir[%d] pgtbl[%d]",
245 			rindex, dindex, pindex);
246 #endif
247 
248 	return (&pt[pindex]);
249 }
250 
251 
252 static void
253 unmap_page(addr_t virtualAddress)
254 {
255 	page_table_entry *pt;
256 
257 	TRACE(("mmu->unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
258 
259 	if (virtualAddress < KERNEL_BASE)
260 		panic("unmap_page: asked to unmap invalid page %p!\n",
261 			(void *)virtualAddress);
262 
263 	// unmap the page from the correct page table
264 	pt = lookup_pte(virtualAddress);
265 
266 	if (pt->type != DT_PAGE)
267 		panic("unmap_page: asked to map non-existing page for %08x\n",
268 			virtualAddress);
269 
270 	pt->addr = TA_TO_PTEA(0xdeadb00b);
271 	pt->type = DT_INVALID;
272 
273 	// flush ATC
274 	asm volatile("pflush (%0)" : : "a" (virtualAddress));
275 }
276 
277 
278 /** insert the physical address into existing page table */
279 static void
280 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
281 {
282 	page_table_entry *pt;
283 
284 	TRACE(("mmu->map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
285 
286 
287 	physicalAddress &= ~(B_PAGE_SIZE - 1);
288 
289 	// map the page to the correct page table
290 
291 	pt = lookup_pte(virtualAddress);
292 
293 	if (pt->type != DT_INVALID)
294 		panic("map_page: asked to map existing page for %08x\n",
295 			virtualAddress);
296 
297 	TRACE(("map_page: inserting pageTableEntry %p, physicalAddress %p\n",
298 		pt, physicalAddress));
299 
300 
301 	pt->addr = TA_TO_PTEA(physicalAddress);
302 	pt->supervisor = 1;
303 #ifdef MMU_HAS_GLOBAL_PAGES
304 	pt->global = 1;
305 #endif
306 	pt->type = DT_PAGE;
307 	// XXX: are flags needed ? ro ? global ?
308 
309 	// flush ATC
310 	asm volatile("pflush (%0)" : : "a" (virtualAddress));
311 
312 	TRACE(("mmu->map_page: done\n"));
313 }
314 
315 
316 
317 
318 const struct boot_mmu_ops k040MMUOps = {
319 	&initialize,
320 	&set_tt,
321 	&load_rp,
322 	&allocate_kernel_pgdirs,
323 	&enable_paging,
324 	&add_page_table,
325 	&unmap_page,
326 	&map_page
327 };
328