xref: /haiku/src/system/boot/platform/u-boot/arch/ppc/arch_mmu.cpp (revision 1a3518cf757c2da8006753f83962da5935bbc82b)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <platform/openfirmware/openfirmware.h>
18 #ifdef __ARM__
19 #include <arm_mmu.h>
20 #endif
21 #include <kernel.h>
22 
23 #include <board_config.h>
24 
25 #include <OS.h>
26 
27 #include <string.h>
28 
29 /*! This implements boot loader mmu support for Book-E PowerPC,
30 	which only support a limited number of TLB and no hardware page table walk,
31 	and does not standardize at how to use the mmu, requiring vendor-specific
32 	code.
33 
34 	Like Linux, we pin one of the TLB entries to a fixed translation,
35 	however we use it differently.
36 	cf. http://kernel.org/doc/ols/2003/ols2003-pages-340-350.pdf
37 
38 	This translation uses a single large page (16 or 256MB are possible) which
39 	directly maps the begining of the RAM.
40 	We use it as a linear space to allocate from at boot time,
41 	loading the kernel and modules into it, and other required data.
42 	Near the end we reserve a page table (it doesn't need to be aligned),
43 	but unlike Linux we use the same globally hashed page table that is
44 	implemented by Classic PPC, to allow reusing code if possible, and also
45 	to limit fragmentation which would occur by using a tree-based page table.
46 	However this means we might actually run out of page table entries in case
47 	of too many collisions.
48 
49 	The kernel will then create areas to cover this already-mapped space.
50 	This also means proper permission bits (RWX) will not be applicable to
51 	separate areas which are enclosed by this mapping.
52 
53 	We put the kernel stack at the end of the mapping so that the guard page is
54 	outsite and thus unmapped. (we don't support SMP)
55 */
56 
57 /*!	The (physical) memory layout of the boot loader is currently as follows:
58 	 0x00000000			kernel
59 	 0x00400000			...modules
60 
61 	 (at least on the Sam460ex U-Boot; we'll need to accomodate other setups)
62 	 0x01000000			boot loader
63 	 0x01800000			Flattened Device Tree
64 	 0x01900000			boot.tgz (= ramdisk)
65 	 0x02000000			boot loader uimage
66 
67 
68 					boot loader heap (should be discarded later on)
69 	 ... 256M-Kstack		page hash table
70 	 ... 256M			kernel stack
71 					kernel stack guard page
72 
73 	The kernel is mapped at KERNEL_BASE, all other stuff mapped by the
74 	loader (kernel args, modules, driver settings, ...) comes after
75 	0x80040000 which means that there is currently only 4 MB reserved for
76 	the kernel itself (see kMaxKernelSize). FIXME: downsize kernel_ppc
77 */
78 
79 
80 int32 of_address_cells(int package);
81 int32 of_size_cells(int package);
82 
83 extern bool gIs440;
84 // XXX:use a base class for Book-E support?
85 extern status_t arch_mmu_setup_pinned_tlb_amcc440(phys_addr_t totalRam,
86 	size_t &tableSize, size_t &tlbSize);
87 
88 #define TRACE_MMU
89 #ifdef TRACE_MMU
90 #	define TRACE(x) dprintf x
91 #else
92 #	define TRACE(x) ;
93 #endif
94 
95 #define TRACE_MEMORY_MAP
96 	// Define this to print the memory map to serial debug.
97 
98 static const size_t kMaxKernelSize = 0x400000;		// 4 MB for the kernel
99 
100 static addr_t sNextPhysicalAddress = kMaxKernelSize; //will be set by mmu_init
101 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
102 //static addr_t sMaxVirtualAddress = KERNEL_BASE + kMaxKernelSize;
103 
104 // working page directory and page table
105 static void *sPageTable = 0 ;
106 
107 
108 static addr_t
109 get_next_virtual_address(size_t size)
110 {
111 	addr_t address = sNextVirtualAddress;
112 	sNextPhysicalAddress += size;
113 	sNextVirtualAddress += size;
114 
115 	return address;
116 }
117 
118 
119 #if 0
120 static addr_t
121 get_next_physical_address(size_t size)
122 {
123 	addr_t address = sNextPhysicalAddress;
124 	sNextPhysicalAddress += size;
125 	sNextVirtualAddress += size;
126 
127 	return address;
128 }
129 #endif
130 
131 
132 //	#pragma mark -
133 
134 
135 extern "C" addr_t
136 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
137 {
138 	panic("WRITEME");
139 	return 0;
140 }
141 
142 
143 /*!	Sets up the final and kernel accessible GDT and IDT tables.
144 	BIOS calls won't work any longer after this function has
145 	been called.
146 */
147 extern "C" void
148 mmu_init_for_kernel(void)
149 {
150 	TRACE(("mmu_init_for_kernel\n"));
151 
152 	// TODO: remove all U-Boot TLB
153 
154 #ifdef TRACE_MEMORY_MAP
155 	{
156 		uint32 i;
157 
158 		dprintf("phys memory ranges:\n");
159 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
160 			dprintf("    base 0x%" B_PRIxPHYSADDR
161 				", length 0x%" B_PRIxPHYSADDR "\n",
162 				gKernelArgs.physical_memory_range[i].start,
163 				gKernelArgs.physical_memory_range[i].size);
164 		}
165 
166 		dprintf("allocated phys memory ranges:\n");
167 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
168 			dprintf("    base 0x%" B_PRIxPHYSADDR
169 				", length 0x%" B_PRIxPHYSADDR "\n",
170 				gKernelArgs.physical_allocated_range[i].start,
171 				gKernelArgs.physical_allocated_range[i].size);
172 		}
173 
174 		dprintf("allocated virt memory ranges:\n");
175 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
176 			dprintf("    base 0x%" B_PRIxPHYSADDR
177 				", length 0x%" B_PRIxPHYSADDR "\n",
178 				gKernelArgs.virtual_allocated_range[i].start,
179 				gKernelArgs.virtual_allocated_range[i].size);
180 		}
181 	}
182 #endif
183 }
184 
185 
186 //TODO:move this to generic/ ?
187 static status_t
188 find_physical_memory_ranges(phys_addr_t &total)
189 {
190 	int memory = -1;
191 	int package;
192 	dprintf("checking for memory...\n");
193 	if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
194 		package = of_finddevice("/memory");
195 	else
196 		package = of_instance_to_package(memory);
197 	if (package == OF_FAILED)
198 		return B_ERROR;
199 
200 	total = 0;
201 
202 	// Memory base addresses are provided in 32 or 64 bit flavors
203 	// #address-cells and #size-cells matches the number of 32-bit 'cells'
204 	// representing the length of the base address and size fields
205 	int root = of_finddevice("/");
206 	int32 regAddressCells = of_address_cells(root);
207 	int32 regSizeCells = of_size_cells(root);
208 	if (regAddressCells == OF_FAILED || regSizeCells == OF_FAILED) {
209 		dprintf("finding base/size length counts failed, assume 32-bit.\n");
210 		regAddressCells = 1;
211 		regSizeCells = 1;
212 	}
213 
214 	// NOTE : Size Cells of 2 is possible in theory... but I haven't seen it yet.
215 	if (regAddressCells > 2 || regSizeCells > 1) {
216 		panic("%s: Unsupported OpenFirmware cell count detected.\n"
217 		"Address Cells: %" B_PRId32 "; Size Cells: %" B_PRId32
218 		" (CPU > 64bit?).\n", __func__, regAddressCells, regSizeCells);
219 		return B_ERROR;
220 	}
221 
222 	// On 64-bit PowerPC systems (G5), our mem base range address is larger
223 	if (regAddressCells == 2) {
224 		struct of_region<uint64, uint32> regions[64];
225 		int count = of_getprop(package, "reg", regions, sizeof(regions));
226 		if (count == OF_FAILED)
227 			count = of_getprop(memory, "reg", regions, sizeof(regions));
228 		if (count == OF_FAILED)
229 			return B_ERROR;
230 		count /= sizeof(regions[0]);
231 
232 		for (int32 i = 0; i < count; i++) {
233 			if (regions[i].size <= 0) {
234 				dprintf("%ld: empty region\n", i);
235 				continue;
236 			}
237 			dprintf("%" B_PRIu32 ": base = %" B_PRIu64 ","
238 				"size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
239 
240 			total += regions[i].size;
241 
242 			if (insert_physical_memory_range((addr_t)regions[i].base,
243 					regions[i].size) != B_OK) {
244 				dprintf("cannot map physical memory range "
245 					"(num ranges = %" B_PRIu32 ")!\n",
246 					gKernelArgs.num_physical_memory_ranges);
247 				return B_ERROR;
248 			}
249 		}
250 		return B_OK;
251 	}
252 
253 	// Otherwise, normal 32-bit PowerPC G3 or G4 have a smaller 32-bit one
254 	struct of_region<uint32, uint32> regions[64];
255 	int count = of_getprop(package, "reg", regions, sizeof(regions));
256 	if (count == OF_FAILED)
257 		count = of_getprop(memory, "reg", regions, sizeof(regions));
258 	if (count == OF_FAILED)
259 		return B_ERROR;
260 	count /= sizeof(regions[0]);
261 
262 	for (int32 i = 0; i < count; i++) {
263 		if (regions[i].size <= 0) {
264 			dprintf("%ld: empty region\n", i);
265 			continue;
266 		}
267 		dprintf("%" B_PRIu32 ": base = %" B_PRIu32 ","
268 			"size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
269 
270 		total += regions[i].size;
271 
272 		if (insert_physical_memory_range((addr_t)regions[i].base,
273 				regions[i].size) != B_OK) {
274 			dprintf("cannot map physical memory range "
275 				"(num ranges = %" B_PRIu32 ")!\n",
276 				gKernelArgs.num_physical_memory_ranges);
277 			return B_ERROR;
278 		}
279 	}
280 
281 	return B_OK;
282 }
283 
284 
285 extern "C" void
286 mmu_init(void* fdt)
287 {
288 	size_t tableSize, tlbSize;
289 	status_t err;
290 	TRACE(("mmu_init\n"));
291 
292 	// get map of physical memory (fill in kernel_args structure)
293 
294 	phys_addr_t total;
295 	if (find_physical_memory_ranges(total) != B_OK) {
296 		dprintf("Error: could not find physical memory ranges!\n");
297 		return /*B_ERROR*/;
298 	}
299 	dprintf("total physical memory = %" B_PRId64 "MB\n", total / (1024 * 1024));
300 
301 	// XXX: ugly, and wrong, there are several 440 mmu types... FIXME
302 	if (gIs440) {
303 		err = arch_mmu_setup_pinned_tlb_amcc440(total, tableSize, tlbSize);
304 		dprintf("setup_pinned_tlb: 0x%08lx table %zdMB tlb %zdMB\n",
305 			err, tableSize / (1024 * 1024), tlbSize / (1024 * 1024));
306 	} else {
307 		panic("Unknown MMU type!");
308 		return;
309 	}
310 
311 	// remember the start of the allocated physical pages
312 	gKernelArgs.physical_allocated_range[0].start
313 		= gKernelArgs.physical_memory_range[0].start;
314 	gKernelArgs.physical_allocated_range[0].size = tlbSize;
315 	gKernelArgs.num_physical_allocated_ranges = 1;
316 
317 	// Save the memory we've virtually allocated (for the kernel and other
318 	// stuff)
319 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
320 	gKernelArgs.virtual_allocated_range[0].size
321 		= tlbSize + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
322 	gKernelArgs.num_virtual_allocated_ranges = 1;
323 
324 
325 	sPageTable = (void *)(tlbSize - tableSize - KERNEL_STACK_SIZE);
326 		// we put the page table near the end of the pinned TLB
327 	TRACE(("page table at 0x%p to 0x%p\n", sPageTable,
328 		(uint8 *)sPageTable + tableSize));
329 
330 	// map in a kernel stack
331 	gKernelArgs.cpu_kstack[0].start = (addr_t)(tlbSize - KERNEL_STACK_SIZE);
332 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
333 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
334 
335 	TRACE(("kernel stack at 0x%Lx to 0x%Lx\n", gKernelArgs.cpu_kstack[0].start,
336 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
337 
338 #ifdef __ARM__
339 	init_page_directory();
340 
341 	// map the page directory on the next vpage
342 	gKernelArgs.arch_args.vir_pgdir = mmu_map_physical_memory(
343 		(addr_t)sPageDirectory, MMU_L1_TABLE_SIZE, kDefaultPageFlags);
344 #endif
345 }
346 
347 
348 //	#pragma mark -
349 
350 
351 extern "C" status_t
352 platform_allocate_region(void **_address, size_t size, uint8 protection,
353 	bool /*exactAddress*/)
354 {
355 	TRACE(("platform_allocate_region(&%p, %zd)\n", *_address, size));
356 
357 	//get_next_virtual_address
358 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
359 		// roundup to page size for clarity
360 
361 	if (*_address != NULL) {
362 		// This special path is almost only useful for loading the
363 		// kernel into memory; it will only allow you to map the
364 		// 'kMaxKernelSize' bytes following the kernel base address.
365 		// Also, it won't check for already mapped addresses, so
366 		// you better know why you are here :)
367 		addr_t address = (addr_t)*_address;
368 
369 		// is the address within the valid range?
370 		if (address < KERNEL_BASE
371 			|| address + size >= KERNEL_BASE + kMaxKernelSize) {
372 			TRACE(("mmu_allocate in illegal range\n address: %" B_PRIx32
373 				"  KERNELBASE: %" B_PRIx32 " KERNEL_BASE + kMaxKernelSize:"
374 				" %" B_PRIx32 "  address + size : %" B_PRIx32 " \n",
375 				(uint32)address, (uint32)KERNEL_BASE,
376 				KERNEL_BASE + kMaxKernelSize, (uint32)(address + size)));
377 			return B_ERROR;
378 		}
379 		TRACE(("platform_allocate_region: allocated %zd bytes at %08lx\n", size,
380 			address));
381 
382 		return B_OK;
383 	}
384 
385 	void *address = (void *)get_next_virtual_address(size);
386 	if (address == NULL)
387 		return B_NO_MEMORY;
388 
389 	TRACE(("platform_allocate_region: allocated %zd bytes at %p\n", size,
390 		address));
391 	*_address = address;
392 	return B_OK;
393 }
394 
395 
396 extern "C" status_t
397 platform_free_region(void *address, size_t size)
398 {
399 	TRACE(("platform_free_region(%p, %zd)\n", address, size));
400 #ifdef __ARM__
401 	mmu_free(address, size);
402 #endif
403 	return B_OK;
404 }
405 
406 
407 void
408 platform_release_heap(struct stage2_args *args, void *base)
409 {
410 	//XXX
411 	// It will be freed automatically, since it is in the
412 	// identity mapped region, and not stored in the kernel's
413 	// page tables.
414 }
415 
416 
417 status_t
418 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
419 {
420 	// the heap is put right before the pagetable
421 	void *heap = (uint8 *)sPageTable - args->heap_size;
422 	//FIXME: use phys addresses to allow passing args to U-Boot?
423 
424 	*_base = heap;
425 	*_top = (void *)((int8 *)heap + args->heap_size);
426 	TRACE(("boot heap at 0x%p to 0x%p\n", *_base, *_top));
427 	return B_OK;
428 }
429