xref: /haiku/src/system/boot/platform/u-boot/arch/ppc/arch_mmu.cpp (revision 220d04022750f40f8bac8f01fa551211e28d04f2)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <platform/openfirmware/openfirmware.h>
18 #ifdef __ARM__
19 #include <arm_mmu.h>
20 #endif
21 #include <kernel.h>
22 
23 #include <board_config.h>
24 
25 #include <OS.h>
26 
27 #include <string.h>
28 
29 /*! This implements boot loader mmu support for Book-E PowerPC,
30 	which only support a limited number of TLB and no hardware page table walk,
31 	and does not standardize at how to use the mmu, requiring vendor-specific
32 	code.
33 
34 	Like Linux, we pin one of the TLB entries to a fixed translation,
35 	however we use it differently.
36 	cf. http://kernel.org/doc/ols/2003/ols2003-pages-340-350.pdf
37 
38 	This translation uses a single large page (16 or 256MB are possible) which
39 	directly maps the begining of the RAM.
40 	We use it as a linear space to allocate from at boot time,
41 	loading the kernel and modules into it, and other required data.
42 	Near the end we reserve a page table (it doesn't need to be aligned),
43 	but unlike Linux we use the same globally hashed page table that is
44 	implemented by Classic PPC, to allow reusing code if possible, and also
45 	to limit fragmentation which would occur by using a tree-based page table.
46 	However this means we might actually run out of page table entries in case
47 	of too many collisions.
48 
49 	The kernel will then create areas to cover this already-mapped space.
50 	This also means proper permission bits (RWX) will not be applicable to
51 	separate areas which are enclosed by this mapping.
52 
53 	We put the kernel stack at the end of the mapping so that the guard page is
54 	outsite and thus unmapped. (we don't support SMP)
55 */
56 
57 /*!	The (physical) memory layout of the boot loader is currently as follows:
58 	 0x00000000			kernel
59 	 0x00400000			...modules
60 
61 	 (at least on the Sam460ex U-Boot; we'll need to accomodate other setups)
62 	 0x01000000			boot loader
63 	 0x01800000			Flattened Device Tree
64 	 0x01900000			boot.tgz (= ramdisk)
65 	 0x02000000			boot loader uimage
66 
67 
68 					boot loader heap (should be discarded later on)
69 	 ... 256M-Kstack		page hash table
70 	 ... 256M			kernel stack
71 					kernel stack guard page
72 
73 	The kernel is mapped at KERNEL_BASE, all other stuff mapped by the
74 	loader (kernel args, modules, driver settings, ...) comes after
75 	0x80040000 which means that there is currently only 4 MB reserved for
76 	the kernel itself (see kMaxKernelSize). FIXME: downsize kernel_ppc
77 */
78 
79 
80 int32 of_address_cells(int package);
81 int32 of_size_cells(int package);
82 
83 extern bool gIs440;
84 // XXX:use a base class for Book-E support?
85 extern status_t arch_mmu_setup_pinned_tlb_amcc440(phys_addr_t totalRam,
86 	size_t &tableSize, size_t &tlbSize);
87 
88 #define TRACE_MMU
89 #ifdef TRACE_MMU
90 #	define TRACE(x) dprintf x
91 #else
92 #	define TRACE(x) ;
93 #endif
94 
95 #define TRACE_MEMORY_MAP
96 	// Define this to print the memory map to serial debug.
97 
98 static const size_t kMaxKernelSize = 0x400000;		// 4 MB for the kernel
99 
100 static addr_t sNextPhysicalAddress = kMaxKernelSize; //will be set by mmu_init
101 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
102 //static addr_t sMaxVirtualAddress = KERNEL_BASE + kMaxKernelSize;
103 
104 // working page directory and page table
105 static void *sPageTable = 0 ;
106 
107 
108 static addr_t
109 get_next_virtual_address(size_t size)
110 {
111 	addr_t address = sNextVirtualAddress;
112 	sNextPhysicalAddress += size;
113 	sNextVirtualAddress += size;
114 
115 	return address;
116 }
117 
118 
119 static addr_t
120 get_next_physical_address(size_t size)
121 {
122 	addr_t address = sNextPhysicalAddress;
123 	sNextPhysicalAddress += size;
124 	sNextVirtualAddress += size;
125 
126 	return address;
127 }
128 
129 
130 //	#pragma mark -
131 
132 
133 extern "C" addr_t
134 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
135 {
136 	panic("WRITEME");
137 	return 0;
138 }
139 
140 
141 /*!	Sets up the final and kernel accessible GDT and IDT tables.
142 	BIOS calls won't work any longer after this function has
143 	been called.
144 */
145 extern "C" void
146 mmu_init_for_kernel(void)
147 {
148 	TRACE(("mmu_init_for_kernel\n"));
149 
150 	// TODO: remove all U-Boot TLB
151 
152 #ifdef TRACE_MEMORY_MAP
153 	{
154 		uint32 i;
155 
156 		dprintf("phys memory ranges:\n");
157 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
158 			dprintf("    base 0x%" B_PRIxPHYSADDR
159 				", length 0x%" B_PRIxPHYSADDR "\n",
160 				gKernelArgs.physical_memory_range[i].start,
161 				gKernelArgs.physical_memory_range[i].size);
162 		}
163 
164 		dprintf("allocated phys memory ranges:\n");
165 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
166 			dprintf("    base 0x%" B_PRIxPHYSADDR
167 				", length 0x%" B_PRIxPHYSADDR "\n",
168 				gKernelArgs.physical_allocated_range[i].start,
169 				gKernelArgs.physical_allocated_range[i].size);
170 		}
171 
172 		dprintf("allocated virt memory ranges:\n");
173 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
174 			dprintf("    base 0x%" B_PRIxPHYSADDR
175 				", length 0x%" B_PRIxPHYSADDR "\n",
176 				gKernelArgs.virtual_allocated_range[i].start,
177 				gKernelArgs.virtual_allocated_range[i].size);
178 		}
179 	}
180 #endif
181 }
182 
183 
184 //TODO:move this to generic/ ?
185 static status_t
186 find_physical_memory_ranges(phys_addr_t &total)
187 {
188 	int memory = -1;
189 	int package;
190 	dprintf("checking for memory...\n");
191 	if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
192 		package = of_finddevice("/memory");
193 	else
194 		package = of_instance_to_package(memory);
195 	if (package == OF_FAILED)
196 		return B_ERROR;
197 
198 	total = 0;
199 
200 	// Memory base addresses are provided in 32 or 64 bit flavors
201 	// #address-cells and #size-cells matches the number of 32-bit 'cells'
202 	// representing the length of the base address and size fields
203 	int root = of_finddevice("/");
204 	int32 regAddressCells = of_address_cells(root);
205 	int32 regSizeCells = of_size_cells(root);
206 	if (regAddressCells == OF_FAILED || regSizeCells == OF_FAILED) {
207 		dprintf("finding base/size length counts failed, assume 32-bit.\n");
208 		regAddressCells = 1;
209 		regSizeCells = 1;
210 	}
211 
212 	// NOTE : Size Cells of 2 is possible in theory... but I haven't seen it yet.
213 	if (regAddressCells > 2 || regSizeCells > 1) {
214 		panic("%s: Unsupported OpenFirmware cell count detected.\n"
215 		"Address Cells: %" B_PRId32 "; Size Cells: %" B_PRId32
216 		" (CPU > 64bit?).\n", __func__, regAddressCells, regSizeCells);
217 		return B_ERROR;
218 	}
219 
220 	// On 64-bit PowerPC systems (G5), our mem base range address is larger
221 	if (regAddressCells == 2) {
222 		struct of_region<uint64> regions[64];
223 		int count = of_getprop(package, "reg", regions, sizeof(regions));
224 		if (count == OF_FAILED)
225 			count = of_getprop(memory, "reg", regions, sizeof(regions));
226 		if (count == OF_FAILED)
227 			return B_ERROR;
228 		count /= sizeof(regions[0]);
229 
230 		for (int32 i = 0; i < count; i++) {
231 			if (regions[i].size <= 0) {
232 				dprintf("%ld: empty region\n", i);
233 				continue;
234 			}
235 			dprintf("%" B_PRIu32 ": base = %" B_PRIu64 ","
236 				"size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
237 
238 			total += regions[i].size;
239 
240 			if (insert_physical_memory_range((addr_t)regions[i].base,
241 					regions[i].size) != B_OK) {
242 				dprintf("cannot map physical memory range "
243 					"(num ranges = %" B_PRIu32 ")!\n",
244 					gKernelArgs.num_physical_memory_ranges);
245 				return B_ERROR;
246 			}
247 		}
248 		return B_OK;
249 	}
250 
251 	// Otherwise, normal 32-bit PowerPC G3 or G4 have a smaller 32-bit one
252 	struct of_region<uint32> regions[64];
253 	int count = of_getprop(package, "reg", regions, sizeof(regions));
254 	if (count == OF_FAILED)
255 		count = of_getprop(memory, "reg", regions, sizeof(regions));
256 	if (count == OF_FAILED)
257 		return B_ERROR;
258 	count /= sizeof(regions[0]);
259 
260 	for (int32 i = 0; i < count; i++) {
261 		if (regions[i].size <= 0) {
262 			dprintf("%ld: empty region\n", i);
263 			continue;
264 		}
265 		dprintf("%" B_PRIu32 ": base = %" B_PRIu32 ","
266 			"size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
267 
268 		total += regions[i].size;
269 
270 		if (insert_physical_memory_range((addr_t)regions[i].base,
271 				regions[i].size) != B_OK) {
272 			dprintf("cannot map physical memory range "
273 				"(num ranges = %" B_PRIu32 ")!\n",
274 				gKernelArgs.num_physical_memory_ranges);
275 			return B_ERROR;
276 		}
277 	}
278 
279 	return B_OK;
280 }
281 
282 
283 extern "C" void
284 mmu_init(void)
285 {
286 	size_t tableSize, tlbSize;
287 	status_t err;
288 	TRACE(("mmu_init\n"));
289 
290 	// get map of physical memory (fill in kernel_args structure)
291 
292 	phys_addr_t total;
293 	if (find_physical_memory_ranges(total) != B_OK) {
294 		dprintf("Error: could not find physical memory ranges!\n");
295 		return /*B_ERROR*/;
296 	}
297 	dprintf("total physical memory = %" B_PRId64 "MB\n", total / (1024 * 1024));
298 
299 	// XXX: ugly, and wrong, there are several 440 mmu types... FIXME
300 	if (gIs440) {
301 		err = arch_mmu_setup_pinned_tlb_amcc440(total, tableSize, tlbSize);
302 		dprintf("setup_pinned_tlb: 0x%08lx table %zdMB tlb %zdMB\n",
303 			err, tableSize / (1024 * 1024), tlbSize / (1024 * 1024));
304 	} else {
305 		panic("Unknown MMU type!");
306 		return;
307 	}
308 
309 	// remember the start of the allocated physical pages
310 	gKernelArgs.physical_allocated_range[0].start
311 		= gKernelArgs.physical_memory_range[0].start;
312 	gKernelArgs.physical_allocated_range[0].size = tlbSize;
313 	gKernelArgs.num_physical_allocated_ranges = 1;
314 
315 	// Save the memory we've virtually allocated (for the kernel and other
316 	// stuff)
317 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
318 	gKernelArgs.virtual_allocated_range[0].size
319 		= tlbSize + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
320 	gKernelArgs.num_virtual_allocated_ranges = 1;
321 
322 
323 	sPageTable = (void *)(tlbSize - tableSize - KERNEL_STACK_SIZE);
324 		// we put the page table near the end of the pinned TLB
325 	TRACE(("page table at 0x%p to 0x%p\n", sPageTable,
326 		(uint8 *)sPageTable + tableSize));
327 
328 	// map in a kernel stack
329 	gKernelArgs.cpu_kstack[0].start = (addr_t)(tlbSize - KERNEL_STACK_SIZE);
330 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
331 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
332 
333 	TRACE(("kernel stack at 0x%Lx to 0x%Lx\n", gKernelArgs.cpu_kstack[0].start,
334 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
335 
336 #ifdef __ARM__
337 	init_page_directory();
338 
339 	// map the page directory on the next vpage
340 	gKernelArgs.arch_args.vir_pgdir = mmu_map_physical_memory(
341 		(addr_t)sPageDirectory, MMU_L1_TABLE_SIZE, kDefaultPageFlags);
342 #endif
343 }
344 
345 
346 //	#pragma mark -
347 
348 
349 extern "C" status_t
350 platform_allocate_region(void **_address, size_t size, uint8 protection,
351 	bool /*exactAddress*/)
352 {
353 	TRACE(("platform_allocate_region(&%p, %zd)\n", *_address, size));
354 
355 	//get_next_virtual_address
356 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
357 		// roundup to page size for clarity
358 
359 	if (*_address != NULL) {
360 		// This special path is almost only useful for loading the
361 		// kernel into memory; it will only allow you to map the
362 		// 'kMaxKernelSize' bytes following the kernel base address.
363 		// Also, it won't check for already mapped addresses, so
364 		// you better know why you are here :)
365 		addr_t address = (addr_t)*_address;
366 
367 		// is the address within the valid range?
368 		if (address < KERNEL_BASE
369 			|| address + size >= KERNEL_BASE + kMaxKernelSize) {
370 			TRACE(("mmu_allocate in illegal range\n address: %lx"
371 				"  KERNELBASE: %lx KERNEL_BASE + kMaxKernelSize: %lx"
372 				"  address + size : %lx \n", (uint32)address, KERNEL_BASE,
373 				KERNEL_BASE + kMaxKernelSize, (uint32)(address + size)));
374 			return B_ERROR;
375 		}
376 		TRACE(("platform_allocate_region: allocated %zd bytes at %08lx\n", size,
377 			address));
378 
379 		return B_OK;
380 	}
381 
382 	void *address = (void *)get_next_virtual_address(size);
383 	if (address == NULL)
384 		return B_NO_MEMORY;
385 
386 	TRACE(("platform_allocate_region: allocated %zd bytes at %p\n", size,
387 		address));
388 	*_address = address;
389 	return B_OK;
390 }
391 
392 
393 extern "C" status_t
394 platform_free_region(void *address, size_t size)
395 {
396 	TRACE(("platform_free_region(%p, %zd)\n", address, size));
397 #ifdef __ARM__
398 	mmu_free(address, size);
399 #endif
400 	return B_OK;
401 }
402 
403 
404 void
405 platform_release_heap(struct stage2_args *args, void *base)
406 {
407 	//XXX
408 	// It will be freed automatically, since it is in the
409 	// identity mapped region, and not stored in the kernel's
410 	// page tables.
411 }
412 
413 
414 status_t
415 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
416 {
417 	// the heap is put right before the pagetable
418 	void *heap = (uint8 *)sPageTable - args->heap_size;
419 	//FIXME: use phys addresses to allow passing args to U-Boot?
420 
421 	*_base = heap;
422 	*_top = (void *)((int8 *)heap + args->heap_size);
423 	TRACE(("boot heap at 0x%p to 0x%p\n", *_base, *_top));
424 	return B_OK;
425 }
426