1 /*
2 * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3 * Based on code written by Travis Geiselbrecht for NewOS.
4 *
5 * Distributed under the terms of the MIT License.
6 */
7
8
9 #include "mmu.h"
10
11 #include <boot/platform.h>
12 #include <boot/stdio.h>
13 #include <boot/kernel_args.h>
14 #include <boot/stage2.h>
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <platform/openfirmware/openfirmware.h>
18 #ifdef __ARM__
19 #include <arm_mmu.h>
20 #endif
21 #include <kernel.h>
22
23 #include <board_config.h>
24
25 #include <OS.h>
26
27 #include <string.h>
28
29 /*! This implements boot loader mmu support for Book-E PowerPC,
30 which only support a limited number of TLB and no hardware page table walk,
31 and does not standardize at how to use the mmu, requiring vendor-specific
32 code.
33
34 Like Linux, we pin one of the TLB entries to a fixed translation,
35 however we use it differently.
36 cf. http://kernel.org/doc/ols/2003/ols2003-pages-340-350.pdf
37
38 This translation uses a single large page (16 or 256MB are possible) which
39 directly maps the begining of the RAM.
40 We use it as a linear space to allocate from at boot time,
41 loading the kernel and modules into it, and other required data.
42 Near the end we reserve a page table (it doesn't need to be aligned),
43 but unlike Linux we use the same globally hashed page table that is
44 implemented by Classic PPC, to allow reusing code if possible, and also
45 to limit fragmentation which would occur by using a tree-based page table.
46 However this means we might actually run out of page table entries in case
47 of too many collisions.
48
49 The kernel will then create areas to cover this already-mapped space.
50 This also means proper permission bits (RWX) will not be applicable to
51 separate areas which are enclosed by this mapping.
52
53 We put the kernel stack at the end of the mapping so that the guard page is
54 outsite and thus unmapped. (we don't support SMP)
55 */
56
57 /*! The (physical) memory layout of the boot loader is currently as follows:
58 0x00000000 kernel
59 0x00400000 ...modules
60
61 (at least on the Sam460ex U-Boot; we'll need to accomodate other setups)
62 0x01000000 boot loader
63 0x01800000 Flattened Device Tree
64 0x01900000 boot.tgz (= ramdisk)
65 0x02000000 boot loader uimage
66
67
68 boot loader heap (should be discarded later on)
69 ... 256M-Kstack page hash table
70 ... 256M kernel stack
71 kernel stack guard page
72
73 The kernel is mapped at KERNEL_BASE, all other stuff mapped by the
74 loader (kernel args, modules, driver settings, ...) comes after
75 0x80040000 which means that there is currently only 4 MB reserved for
76 the kernel itself (see kMaxKernelSize). FIXME: downsize kernel_ppc
77 */
78
79
80 int32 of_address_cells(int package);
81 int32 of_size_cells(int package);
82
83 extern bool gIs440;
84 // XXX:use a base class for Book-E support?
85 extern status_t arch_mmu_setup_pinned_tlb_amcc440(phys_addr_t totalRam,
86 size_t &tableSize, size_t &tlbSize);
87
88 #define TRACE_MMU
89 #ifdef TRACE_MMU
90 # define TRACE(x) dprintf x
91 #else
92 # define TRACE(x) ;
93 #endif
94
95 #define TRACE_MEMORY_MAP
96 // Define this to print the memory map to serial debug.
97
98 static const size_t kMaxKernelSize = 0x400000; // 4 MB for the kernel
99
100 static addr_t sNextPhysicalAddress = kMaxKernelSize; //will be set by mmu_init
101 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
102 //static addr_t sMaxVirtualAddress = KERNEL_BASE + kMaxKernelSize;
103
104 // working page directory and page table
105 static void *sPageTable = 0;
106 static bool sHeapRegionAllocated = false;
107
108
109 static addr_t
get_next_virtual_address(size_t size)110 get_next_virtual_address(size_t size)
111 {
112 addr_t address = sNextVirtualAddress;
113 sNextPhysicalAddress += size;
114 sNextVirtualAddress += size;
115
116 return address;
117 }
118
119
120 #if 0
121 static addr_t
122 get_next_physical_address(size_t size)
123 {
124 addr_t address = sNextPhysicalAddress;
125 sNextPhysicalAddress += size;
126 sNextVirtualAddress += size;
127
128 return address;
129 }
130 #endif
131
132
133 // #pragma mark -
134
135
136 extern "C" addr_t
mmu_map_physical_memory(addr_t physicalAddress,size_t size,uint32 flags)137 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
138 {
139 panic("WRITEME");
140 return 0;
141 }
142
143
144 /*! Sets up the final and kernel accessible GDT and IDT tables.
145 BIOS calls won't work any longer after this function has
146 been called.
147 */
148 extern "C" void
mmu_init_for_kernel(void)149 mmu_init_for_kernel(void)
150 {
151 TRACE(("mmu_init_for_kernel\n"));
152
153 // TODO: remove all U-Boot TLB
154
155 #ifdef TRACE_MEMORY_MAP
156 {
157 uint32 i;
158
159 dprintf("phys memory ranges:\n");
160 for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
161 dprintf(" base 0x%" B_PRIxPHYSADDR
162 ", length 0x%" B_PRIxPHYSADDR "\n",
163 gKernelArgs.physical_memory_range[i].start,
164 gKernelArgs.physical_memory_range[i].size);
165 }
166
167 dprintf("allocated phys memory ranges:\n");
168 for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
169 dprintf(" base 0x%" B_PRIxPHYSADDR
170 ", length 0x%" B_PRIxPHYSADDR "\n",
171 gKernelArgs.physical_allocated_range[i].start,
172 gKernelArgs.physical_allocated_range[i].size);
173 }
174
175 dprintf("allocated virt memory ranges:\n");
176 for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
177 dprintf(" base 0x%" B_PRIxPHYSADDR
178 ", length 0x%" B_PRIxPHYSADDR "\n",
179 gKernelArgs.virtual_allocated_range[i].start,
180 gKernelArgs.virtual_allocated_range[i].size);
181 }
182 }
183 #endif
184 }
185
186
187 //TODO:move this to generic/ ?
188 static status_t
find_physical_memory_ranges(phys_addr_t & total)189 find_physical_memory_ranges(phys_addr_t &total)
190 {
191 int memory = -1;
192 int package;
193 dprintf("checking for memory...\n");
194 if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
195 package = of_finddevice("/memory");
196 else
197 package = of_instance_to_package(memory);
198 if (package == OF_FAILED)
199 return B_ERROR;
200
201 total = 0;
202
203 // Memory base addresses are provided in 32 or 64 bit flavors
204 // #address-cells and #size-cells matches the number of 32-bit 'cells'
205 // representing the length of the base address and size fields
206 int root = of_finddevice("/");
207 int32 regAddressCells = of_address_cells(root);
208 int32 regSizeCells = of_size_cells(root);
209 if (regAddressCells == OF_FAILED || regSizeCells == OF_FAILED) {
210 dprintf("finding base/size length counts failed, assume 32-bit.\n");
211 regAddressCells = 1;
212 regSizeCells = 1;
213 }
214
215 // NOTE : Size Cells of 2 is possible in theory... but I haven't seen it yet.
216 if (regAddressCells > 2 || regSizeCells > 1) {
217 panic("%s: Unsupported OpenFirmware cell count detected.\n"
218 "Address Cells: %" B_PRId32 "; Size Cells: %" B_PRId32
219 " (CPU > 64bit?).\n", __func__, regAddressCells, regSizeCells);
220 return B_ERROR;
221 }
222
223 // On 64-bit PowerPC systems (G5), our mem base range address is larger
224 if (regAddressCells == 2) {
225 struct of_region<uint64, uint32> regions[64];
226 int count = of_getprop(package, "reg", regions, sizeof(regions));
227 if (count == OF_FAILED)
228 count = of_getprop(memory, "reg", regions, sizeof(regions));
229 if (count == OF_FAILED)
230 return B_ERROR;
231 count /= sizeof(regions[0]);
232
233 for (int32 i = 0; i < count; i++) {
234 if (regions[i].size <= 0) {
235 dprintf("%ld: empty region\n", i);
236 continue;
237 }
238 dprintf("%" B_PRIu32 ": base = %" B_PRIu64 ","
239 "size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
240
241 total += regions[i].size;
242
243 if (insert_physical_memory_range((addr_t)regions[i].base,
244 regions[i].size) != B_OK) {
245 dprintf("cannot map physical memory range "
246 "(num ranges = %" B_PRIu32 ")!\n",
247 gKernelArgs.num_physical_memory_ranges);
248 return B_ERROR;
249 }
250 }
251 return B_OK;
252 }
253
254 // Otherwise, normal 32-bit PowerPC G3 or G4 have a smaller 32-bit one
255 struct of_region<uint32, uint32> regions[64];
256 int count = of_getprop(package, "reg", regions, sizeof(regions));
257 if (count == OF_FAILED)
258 count = of_getprop(memory, "reg", regions, sizeof(regions));
259 if (count == OF_FAILED)
260 return B_ERROR;
261 count /= sizeof(regions[0]);
262
263 for (int32 i = 0; i < count; i++) {
264 if (regions[i].size <= 0) {
265 dprintf("%ld: empty region\n", i);
266 continue;
267 }
268 dprintf("%" B_PRIu32 ": base = %" B_PRIu32 ","
269 "size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
270
271 total += regions[i].size;
272
273 if (insert_physical_memory_range((addr_t)regions[i].base,
274 regions[i].size) != B_OK) {
275 dprintf("cannot map physical memory range "
276 "(num ranges = %" B_PRIu32 ")!\n",
277 gKernelArgs.num_physical_memory_ranges);
278 return B_ERROR;
279 }
280 }
281
282 return B_OK;
283 }
284
285
286 extern "C" void
mmu_init(void * fdt)287 mmu_init(void* fdt)
288 {
289 size_t tableSize, tlbSize;
290 status_t err;
291 TRACE(("mmu_init\n"));
292
293 // get map of physical memory (fill in kernel_args structure)
294
295 phys_addr_t total;
296 if (find_physical_memory_ranges(total) != B_OK) {
297 dprintf("Error: could not find physical memory ranges!\n");
298 return /*B_ERROR*/;
299 }
300 dprintf("total physical memory = %" B_PRId64 "MB\n", total / (1024 * 1024));
301
302 // XXX: ugly, and wrong, there are several 440 mmu types... FIXME
303 if (gIs440) {
304 err = arch_mmu_setup_pinned_tlb_amcc440(total, tableSize, tlbSize);
305 dprintf("setup_pinned_tlb: 0x%08lx table %zdMB tlb %zdMB\n",
306 err, tableSize / (1024 * 1024), tlbSize / (1024 * 1024));
307 } else {
308 panic("Unknown MMU type!");
309 return;
310 }
311
312 // remember the start of the allocated physical pages
313 gKernelArgs.physical_allocated_range[0].start
314 = gKernelArgs.physical_memory_range[0].start;
315 gKernelArgs.physical_allocated_range[0].size = tlbSize;
316 gKernelArgs.num_physical_allocated_ranges = 1;
317
318 // Save the memory we've virtually allocated (for the kernel and other
319 // stuff)
320 gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
321 gKernelArgs.virtual_allocated_range[0].size
322 = tlbSize + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
323 gKernelArgs.num_virtual_allocated_ranges = 1;
324
325
326 sPageTable = (void *)(tlbSize - tableSize - KERNEL_STACK_SIZE);
327 // we put the page table near the end of the pinned TLB
328 TRACE(("page table at 0x%p to 0x%p\n", sPageTable,
329 (uint8 *)sPageTable + tableSize));
330
331 // map in a kernel stack
332 gKernelArgs.cpu_kstack[0].start = (addr_t)(tlbSize - KERNEL_STACK_SIZE);
333 gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
334 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
335
336 TRACE(("kernel stack at 0x%Lx to 0x%Lx\n", gKernelArgs.cpu_kstack[0].start,
337 gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
338
339 #ifdef __ARM__
340 init_page_directory();
341
342 // map the page directory on the next vpage
343 gKernelArgs.arch_args.vir_pgdir = mmu_map_physical_memory(
344 (addr_t)sPageDirectory, MMU_L1_TABLE_SIZE, kDefaultPageFlags);
345 #endif
346 }
347
348
349 // #pragma mark -
350
351
352 extern "C" status_t
platform_allocate_region(void ** _address,size_t size,uint8 protection,bool)353 platform_allocate_region(void **_address, size_t size, uint8 protection,
354 bool /*exactAddress*/)
355 {
356 TRACE(("platform_allocate_region(&%p, %zd)\n", *_address, size));
357
358 //get_next_virtual_address
359 size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
360 // roundup to page size for clarity
361
362 if (*_address != NULL) {
363 // This special path is almost only useful for loading the
364 // kernel into memory; it will only allow you to map the
365 // 'kMaxKernelSize' bytes following the kernel base address.
366 // Also, it won't check for already mapped addresses, so
367 // you better know why you are here :)
368 addr_t address = (addr_t)*_address;
369
370 // is the address within the valid range?
371 if (address < KERNEL_BASE
372 || address + size >= KERNEL_BASE + kMaxKernelSize) {
373 TRACE(("mmu_allocate in illegal range\n address: %" B_PRIx32
374 " KERNELBASE: %" B_PRIx32 " KERNEL_BASE + kMaxKernelSize:"
375 " %" B_PRIx32 " address + size : %" B_PRIx32 " \n",
376 (uint32)address, (uint32)KERNEL_BASE,
377 KERNEL_BASE + kMaxKernelSize, (uint32)(address + size)));
378 return B_ERROR;
379 }
380 TRACE(("platform_allocate_region: allocated %zd bytes at %08lx\n", size,
381 address));
382
383 return B_OK;
384 }
385
386 void *address = (void *)get_next_virtual_address(size);
387 if (address == NULL)
388 return B_NO_MEMORY;
389
390 TRACE(("platform_allocate_region: allocated %zd bytes at %p\n", size,
391 address));
392 *_address = address;
393 return B_OK;
394 }
395
396
397 extern "C" status_t
platform_free_region(void * address,size_t size)398 platform_free_region(void *address, size_t size)
399 {
400 TRACE(("platform_free_region(%p, %zd)\n", address, size));
401 #ifdef __ARM__
402 mmu_free(address, size);
403 #endif
404 return B_OK;
405 }
406
407
408 ssize_t
platform_allocate_heap_region(size_t size,void ** _base)409 platform_allocate_heap_region(size_t size, void **_base)
410 {
411 if (sHeapRegionAllocated)
412 return B_NO_MEMORY;
413 sHeapRegionAllocated = true;
414
415 // the heap is put right before the pagetable
416 void *heap = (uint8 *)sPageTable - size;
417 //FIXME: use phys addresses to allow passing args to U-Boot?
418
419 *_base = heap;
420 TRACE(("boot heap at 0x%p\n", *_base));
421 return size;
422 }
423
424
425 void
platform_free_heap_region(void * _base,size_t size)426 platform_free_heap_region(void *_base, size_t size)
427 {
428 //XXX
429 // It will be freed automatically, since it is in the
430 // identity mapped region, and not stored in the kernel's
431 // page tables.
432 }
433