xref: /haiku/src/system/boot/platform/bios_ia32/mmu.cpp (revision 62f5ba006a08b0df30631375878effaf67ae5dbc)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 #include "bios.h"
11 
12 #include <boot/platform.h>
13 #include <boot/stdio.h>
14 #include <boot/kernel_args.h>
15 #include <boot/stage2.h>
16 #include <arch/cpu.h>
17 #include <arch_kernel.h>
18 #include <kernel.h>
19 
20 #include <OS.h>
21 
22 #include <string.h>
23 
24 
25 /*!	The (physical) memory layout of the boot loader is currently as follows:
26 	  0x0500 - 0x10000	protected mode stack
27 	  0x0500 - 0x09000	real mode stack
28 	 0x10000 - ?		code (up to ~500 kB)
29 	 0x90000			1st temporary page table (identity maps 0-4 MB)
30 	 0x91000			2nd (4-8 MB)
31 	 0x92000 - 0x92000	further page tables
32 	 0x9e000 - 0xa0000	SMP trampoline code
33 	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
34 	0x100000			page directory
35 	     ...			boot loader heap (32 kB)
36 	     ...			free physical memory
37 
38 	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
39 	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
40 	loader (kernel args, modules, driver settings, ...) comes after
41 	0x80020000 which means that there is currently only 2 MB reserved for
42 	the kernel itself (see kMaxKernelSize).
43 
44 	The layout in PXE mode differs a bit from this, see definitions below.
45 */
46 
47 //#define TRACE_MMU
48 #ifdef TRACE_MMU
49 #	define TRACE(x...) dprintf(x)
50 #else
51 #	define TRACE(x...) ;
52 #endif
53 
54 
55 //#define TRACE_MEMORY_MAP
56 	// Define this to print the memory map to serial debug,
57 	// You also need to define ENABLE_SERIAL in serial.cpp
58 	// for output to work.
59 
60 
61 struct gdt_idt_descr {
62 	uint16 limit;
63 	uint32 *base;
64 } _PACKED;
65 
66 // memory structure returned by int 0x15, ax 0xe820
67 struct extended_memory {
68 	uint64 base_addr;
69 	uint64 length;
70 	uint32 type;
71 };
72 
73 
74 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
75 static const size_t kMaxKernelSize = 0x1000000;		// 16 MB for the kernel
76 
77 // working page directory and page table
78 static uint32 *sPageDirectory = 0;
79 
80 #ifdef _PXE_ENV
81 
82 static addr_t sNextPhysicalAddress = 0x112000;
83 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
84 
85 static addr_t sNextPageTableAddress = 0x7d000;
86 static const uint32 kPageTableRegionEnd = 0x8b000;
87 	// we need to reserve 2 pages for the SMP trampoline code
88 
89 #else
90 
91 static addr_t sNextPhysicalAddress = 0x100000;
92 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
93 
94 static addr_t sNextPageTableAddress = 0x90000;
95 static const uint32 kPageTableRegionEnd = 0x9e000;
96 	// we need to reserve 2 pages for the SMP trampoline code
97 
98 #endif
99 
100 
101 static addr_t
102 get_next_virtual_address(size_t size)
103 {
104 	addr_t address = sNextVirtualAddress;
105 	sNextVirtualAddress += size;
106 
107 	return address;
108 }
109 
110 
111 static addr_t
112 get_next_physical_address(size_t size)
113 {
114 	addr_t base;
115 	if (!get_free_address_range(gKernelArgs.physical_allocated_range,
116 			gKernelArgs.num_physical_allocated_ranges, sNextPhysicalAddress,
117 			size, &base)) {
118 		panic("Out of physical memory!");
119 		return 0;
120 	}
121 
122 	insert_physical_allocated_range(base, size);
123 	sNextPhysicalAddress = base + size;
124 		// TODO: Can overflow theoretically.
125 
126 	return base;
127 }
128 
129 
130 static addr_t
131 get_next_virtual_page()
132 {
133 	return get_next_virtual_address(B_PAGE_SIZE);
134 }
135 
136 
137 static addr_t
138 get_next_physical_page()
139 {
140 	return get_next_physical_address(B_PAGE_SIZE);
141 }
142 
143 
144 static uint32 *
145 get_next_page_table()
146 {
147 	TRACE("get_next_page_table, sNextPageTableAddress %#" B_PRIxADDR
148 		", kPageTableRegionEnd %#" B_PRIxADDR "\n", sNextPageTableAddress,
149 		kPageTableRegionEnd);
150 
151 	addr_t address = sNextPageTableAddress;
152 	if (address >= kPageTableRegionEnd)
153 		return (uint32 *)get_next_physical_page();
154 
155 	sNextPageTableAddress += B_PAGE_SIZE;
156 	return (uint32 *)address;
157 }
158 
159 
160 /*!	Adds a new page table for the specified base address */
161 static uint32*
162 add_page_table(addr_t base)
163 {
164 	base = ROUNDDOWN(base, B_PAGE_SIZE * 1024);
165 
166 	// Get new page table and clear it out
167 	uint32 *pageTable = get_next_page_table();
168 	if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
169 		panic("tried to add page table beyond the identity mapped 8 MB "
170 			"region\n");
171 		return NULL;
172 	}
173 
174 	TRACE("add_page_table(base = %p), got page: %p\n", (void*)base, pageTable);
175 
176 	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++]
177 		= (uint32)pageTable;
178 
179 	for (int32 i = 0; i < 1024; i++)
180 		pageTable[i] = 0;
181 
182 	// put the new page table into the page directory
183 	sPageDirectory[base / (4 * 1024 * 1024)]
184 		= (uint32)pageTable | kDefaultPageTableFlags;
185 
186 	// update the virtual end address in the kernel args
187 	base += B_PAGE_SIZE * 1024;
188 	if (base > gKernelArgs.arch_args.virtual_end)
189 		gKernelArgs.arch_args.virtual_end = base;
190 
191 	return pageTable;
192 }
193 
194 
195 static void
196 unmap_page(addr_t virtualAddress)
197 {
198 	TRACE("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress);
199 
200 	if (virtualAddress < KERNEL_BASE) {
201 		panic("unmap_page: asked to unmap invalid page %p!\n",
202 			(void *)virtualAddress);
203 	}
204 
205 	// unmap the page from the correct page table
206 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
207 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
208 	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
209 
210 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
211 }
212 
213 
214 /*!	Creates an entry to map the specified virtualAddress to the given
215 	physicalAddress.
216 	If the mapping goes beyond the current page table, it will allocate
217 	a new one. If it cannot map the requested page, it panics.
218 */
219 static void
220 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
221 {
222 	TRACE("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress,
223 		physicalAddress);
224 
225 	if (virtualAddress < KERNEL_BASE) {
226 		panic("map_page: asked to map invalid page %p!\n",
227 			(void *)virtualAddress);
228 	}
229 
230 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
231 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
232 
233 	if (pageTable == NULL) {
234 		// we need to add a new page table
235 		pageTable = add_page_table(virtualAddress);
236 
237 		if (pageTable == NULL) {
238 			panic("map_page: failed to allocate a page table for virtual "
239 				"address %p\n", (void*)virtualAddress);
240 			return;
241 		}
242 	}
243 
244 	physicalAddress &= ~(B_PAGE_SIZE - 1);
245 
246 	// map the page to the correct page table
247 	uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE;
248 
249 	TRACE("map_page: inserting pageTable %p, tableEntry %" B_PRIu32
250 		", physicalAddress %#" B_PRIxADDR "\n", pageTable, tableEntry,
251 		physicalAddress);
252 
253 	pageTable[tableEntry] = physicalAddress | flags;
254 
255 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
256 
257 	TRACE("map_page: done\n");
258 }
259 
260 
261 static void
262 sort_addr_range(addr_range *range, int count)
263 {
264 	addr_range tempRange;
265 	bool done;
266 	int i;
267 
268 	do {
269 		done = true;
270 		for (i = 1; i < count; i++) {
271 			if (range[i].start < range[i - 1].start) {
272 				done = false;
273 				memcpy(&tempRange, &range[i], sizeof(addr_range));
274 				memcpy(&range[i], &range[i - 1], sizeof(addr_range));
275 				memcpy(&range[i - 1], &tempRange, sizeof(addr_range));
276 			}
277 		}
278 	} while (!done);
279 }
280 
281 
282 
283 #ifdef TRACE_MEMORY_MAP
284 static const char *
285 e820_memory_type(uint32 type)
286 {
287 	switch (type) {
288 		case 1: return "memory";
289 		case 2: return "reserved";
290 		case 3: return "ACPI reclaim";
291 		case 4: return "ACPI NVS";
292 		default: return "unknown/reserved";
293 	}
294 }
295 #endif
296 
297 
298 static uint32
299 get_memory_map(extended_memory **_extendedMemory)
300 {
301 	extended_memory *block = (extended_memory *)kExtraSegmentScratch;
302 	bios_regs regs = {0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
303 	uint32 count = 0;
304 
305 	TRACE("get_memory_map()\n");
306 
307 	do {
308 		regs.eax = 0xe820;
309 		regs.edx = 'SMAP';
310 
311 		call_bios(0x15, &regs);
312 		if (regs.flags & CARRY_FLAG)
313 			return 0;
314 
315 		regs.edi += sizeof(extended_memory);
316 		count++;
317 	} while (regs.ebx != 0);
318 
319 	*_extendedMemory = block;
320 
321 #ifdef TRACE_MEMORY_MAP
322 	dprintf("extended memory info (from 0xe820):\n");
323 	for (uint32 i = 0; i < count; i++) {
324 		dprintf("    base 0x%08Lx, len 0x%08Lx, type %lu (%s)\n",
325 			block[i].base_addr, block[i].length,
326 			block[i].type, e820_memory_type(block[i].type));
327 	}
328 #endif
329 
330 	return count;
331 }
332 
333 
334 static void
335 init_page_directory(void)
336 {
337 	TRACE("init_page_directory\n");
338 
339 	// allocate a new pgdir
340 	sPageDirectory = (uint32 *)get_next_physical_page();
341 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
342 
343 	// clear out the pgdir
344 	for (int32 i = 0; i < 1024; i++) {
345 		sPageDirectory[i] = 0;
346 	}
347 
348 	// Identity map the first 8 MB of memory so that their
349 	// physical and virtual address are the same.
350 	// These page tables won't be taken over into the kernel.
351 
352 	// make the first page table at the first free spot
353 	uint32 *pageTable = get_next_page_table();
354 
355 	for (int32 i = 0; i < 1024; i++) {
356 		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
357 	}
358 
359 	sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
360 
361 	// make the second page table
362 	pageTable = get_next_page_table();
363 
364 	for (int32 i = 0; i < 1024; i++) {
365 		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
366 	}
367 
368 	sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
369 
370 	gKernelArgs.arch_args.num_pgtables = 0;
371 
372 	// switch to the new pgdir and enable paging
373 	asm("movl %0, %%eax;"
374 		"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
375 	// Important.  Make sure supervisor threads can fault on read only pages...
376 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
377 }
378 
379 
380 //	#pragma mark -
381 
382 
383 /*!
384 	Neither \a virtualAddress nor \a size need to be aligned, but the function
385 	will map all pages the range intersects with.
386 	If physicalAddress is not page-aligned, the returned virtual address will
387 	have the same "misalignment".
388 */
389 extern "C" addr_t
390 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
391 {
392 	addr_t address = sNextVirtualAddress;
393 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
394 
395 	physicalAddress -= pageOffset;
396 	size += pageOffset;
397 
398 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
399 		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
400 	}
401 
402 	return address + pageOffset;
403 }
404 
405 
406 extern "C" void *
407 mmu_allocate(void *virtualAddress, size_t size)
408 {
409 	TRACE("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: "
410 		"%ld\n", virtualAddress, sNextVirtualAddress, size);
411 
412 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
413 		// get number of pages to map
414 
415 	if (virtualAddress != NULL) {
416 		// This special path is almost only useful for loading the
417 		// kernel into memory; it will only allow you to map the
418 		// 'kMaxKernelSize' bytes following the kernel base address.
419 		// Also, it won't check for already mapped addresses, so
420 		// you better know why you are here :)
421 		addr_t address = (addr_t)virtualAddress;
422 
423 		// is the address within the valid range?
424 		if (address < KERNEL_BASE
425 			|| address + size >= KERNEL_BASE + kMaxKernelSize)
426 			return NULL;
427 
428 		for (uint32 i = 0; i < size; i++) {
429 			map_page(address, get_next_physical_page(), kDefaultPageFlags);
430 			address += B_PAGE_SIZE;
431 		}
432 
433 		return virtualAddress;
434 	}
435 
436 	void *address = (void *)sNextVirtualAddress;
437 
438 	for (uint32 i = 0; i < size; i++) {
439 		map_page(get_next_virtual_page(), get_next_physical_page(),
440 			kDefaultPageFlags);
441 	}
442 
443 	return address;
444 }
445 
446 
447 /*!	Allocates the given physical range.
448 	\return \c true, if the range could be allocated, \c false otherwise.
449 */
450 bool
451 mmu_allocate_physical(addr_t base, size_t size)
452 {
453 	addr_t foundBase;
454 	if (!get_free_address_range(gKernelArgs.physical_allocated_range,
455 			gKernelArgs.num_physical_allocated_ranges, sNextPhysicalAddress,
456 			size, &foundBase)) {
457 		return B_BAD_VALUE;
458 	}
459 
460 	return insert_physical_allocated_range(base, size) == B_OK;
461 }
462 
463 
464 /*!	This will unmap the allocated chunk of memory from the virtual
465 	address space. It might not actually free memory (as its implementation
466 	is very simple), but it might.
467 	Neither \a virtualAddress nor \a size need to be aligned, but the function
468 	will unmap all pages the range intersects with.
469 */
470 extern "C" void
471 mmu_free(void *virtualAddress, size_t size)
472 {
473 	TRACE("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size);
474 
475 	addr_t address = (addr_t)virtualAddress;
476 	addr_t pageOffset = address % B_PAGE_SIZE;
477 	address -= pageOffset;
478 	size = (size + pageOffset + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
479 
480 	// is the address within the valid range?
481 	if (address < KERNEL_BASE || address + size > sNextVirtualAddress) {
482 		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
483 			(void *)address, size);
484 	}
485 
486 	// unmap all pages within the range
487 	for (size_t i = 0; i < size; i += B_PAGE_SIZE) {
488 		unmap_page(address);
489 		address += B_PAGE_SIZE;
490 	}
491 
492 	if (address + size == sNextVirtualAddress) {
493 		// we can actually reuse the virtual address space
494 		sNextVirtualAddress -= size;
495 	}
496 }
497 
498 
499 /*!	Sets up the final and kernel accessible GDT and IDT tables.
500 	BIOS calls won't work any longer after this function has
501 	been called.
502 */
503 extern "C" void
504 mmu_init_for_kernel(void)
505 {
506 	TRACE("mmu_init_for_kernel\n");
507 	// set up a new idt
508 	{
509 		struct gdt_idt_descr idtDescriptor;
510 		uint32 *idt;
511 
512 		// find a new idt
513 		idt = (uint32 *)get_next_physical_page();
514 		gKernelArgs.arch_args.phys_idt = (uint32)idt;
515 
516 		TRACE("idt at %p\n", idt);
517 
518 		// map the idt into virtual space
519 		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
520 		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
521 
522 		// clear it out
523 		uint32* virtualIDT = (uint32*)gKernelArgs.arch_args.vir_idt;
524 		for (int32 i = 0; i < IDT_LIMIT / 4; i++) {
525 			virtualIDT[i] = 0;
526 		}
527 
528 		// load the idt
529 		idtDescriptor.limit = IDT_LIMIT - 1;
530 		idtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_idt;
531 
532 		asm("lidt	%0;"
533 			: : "m" (idtDescriptor));
534 
535 		TRACE("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt);
536 	}
537 
538 	// set up a new gdt
539 	{
540 		struct gdt_idt_descr gdtDescriptor;
541 		segment_descriptor *gdt;
542 
543 		// find a new gdt
544 		gdt = (segment_descriptor *)get_next_physical_page();
545 		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
546 
547 		TRACE("gdt at %p\n", gdt);
548 
549 		// map the gdt into virtual space
550 		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
551 		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
552 
553 		// put standard segment descriptors in it
554 		segment_descriptor* virtualGDT
555 			= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
556 		clear_segment_descriptor(&virtualGDT[0]);
557 
558 		// seg 0x08 - kernel 4GB code
559 		set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
560 			DPL_KERNEL);
561 
562 		// seg 0x10 - kernel 4GB data
563 		set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
564 			DPL_KERNEL);
565 
566 		// seg 0x1b - ring 3 user 4GB code
567 		set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
568 			DPL_USER);
569 
570 		// seg 0x23 - ring 3 user 4GB data
571 		set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
572 			DPL_USER);
573 
574 		// virtualGDT[5] and above will be filled later by the kernel
575 		// to contain the TSS descriptors, and for TLS (one for every CPU)
576 
577 		// load the GDT
578 		gdtDescriptor.limit = GDT_LIMIT - 1;
579 		gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt;
580 
581 		asm("lgdt	%0;"
582 			: : "m" (gdtDescriptor));
583 
584 		TRACE("gdt at virtual address %p\n",
585 			(void*)gKernelArgs.arch_args.vir_gdt);
586 	}
587 
588 	// Save the memory we've virtually allocated (for the kernel and other
589 	// stuff)
590 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
591 	gKernelArgs.virtual_allocated_range[0].size
592 		= sNextVirtualAddress - KERNEL_BASE;
593 	gKernelArgs.num_virtual_allocated_ranges = 1;
594 
595 	// sort the address ranges
596 	sort_addr_range(gKernelArgs.physical_memory_range,
597 		gKernelArgs.num_physical_memory_ranges);
598 	sort_addr_range(gKernelArgs.physical_allocated_range,
599 		gKernelArgs.num_physical_allocated_ranges);
600 	sort_addr_range(gKernelArgs.virtual_allocated_range,
601 		gKernelArgs.num_virtual_allocated_ranges);
602 
603 #ifdef TRACE_MEMORY_MAP
604 	{
605 		uint32 i;
606 
607 		dprintf("phys memory ranges:\n");
608 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
609 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_memory_range[i].start, gKernelArgs.physical_memory_range[i].size);
610 		}
611 
612 		dprintf("allocated phys memory ranges:\n");
613 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
614 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_allocated_range[i].start, gKernelArgs.physical_allocated_range[i].size);
615 		}
616 
617 		dprintf("allocated virt memory ranges:\n");
618 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
619 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.virtual_allocated_range[i].start, gKernelArgs.virtual_allocated_range[i].size);
620 		}
621 	}
622 #endif
623 }
624 
625 
626 extern "C" void
627 mmu_init(void)
628 {
629 	TRACE("mmu_init\n");
630 
631 	gKernelArgs.arch_args.virtual_end = KERNEL_BASE;
632 
633 	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
634 	gKernelArgs.physical_allocated_range[0].size = 0;
635 	gKernelArgs.num_physical_allocated_ranges = 1;
636 		// remember the start of the allocated physical pages
637 
638 	init_page_directory();
639 
640 	// Map the page directory into kernel space at 0xffc00000-0xffffffff
641 	// this enables a mmu trick where the 4 MB region that this pgdir entry
642 	// represents now maps the 4MB of potential pagetables that the pgdir
643 	// points to. Thrown away later in VM bringup, but useful for now.
644 	sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
645 
646 	// also map it on the next vpage
647 	gKernelArgs.arch_args.vir_pgdir = get_next_virtual_page();
648 	map_page(gKernelArgs.arch_args.vir_pgdir, (uint32)sPageDirectory,
649 		kDefaultPageFlags);
650 
651 	// map in a kernel stack
652 	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
653 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
654 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
655 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
656 
657 	TRACE("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
658 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size);
659 
660 	extended_memory *extMemoryBlock;
661 	uint32 extMemoryCount = get_memory_map(&extMemoryBlock);
662 
663 	// figure out the memory map
664 	if (extMemoryCount > 0) {
665 		gKernelArgs.num_physical_memory_ranges = 0;
666 
667 		for (uint32 i = 0; i < extMemoryCount; i++) {
668 			// Type 1 is available memory
669 			if (extMemoryBlock[i].type == 1) {
670 				uint64 base = extMemoryBlock[i].base_addr;
671 				uint64 end = base + extMemoryBlock[i].length;
672 
673 				// round everything up to page boundaries, exclusive of pages
674 				// it partially occupies
675 				base = ROUNDUP(base, B_PAGE_SIZE);
676 				end = ROUNDDOWN(end, B_PAGE_SIZE);
677 
678 				// we ignore all memory beyond 4 GB
679 				if (end > 0x100000000ULL)
680 					end = 0x100000000ULL;
681 
682 				// Also ignore memory below 1 MB. Apparently some BIOSes fail to
683 				// provide the correct range type for some ranges (cf. #1925).
684 				// Later in the kernel we will reserve the range 0x0 - 0xa0000
685 				// and apparently 0xa0000 - 0x100000 never contain usable
686 				// memory, so we don't lose anything by doing that.
687 				if (base < 0x100000)
688 					base = 0x100000;
689 
690 				if (end <= base)
691 					continue;
692 
693 				if (insert_physical_memory_range(base, end - base) != B_OK) {
694 					panic("mmu_init(): Failed to add physical memory range "
695 						"%#" B_PRIx64 " - %#" B_PRIx64 "\n", base, end);
696 				}
697 			}
698 		}
699 
700 		// sort the ranges
701 		sort_addr_range(gKernelArgs.physical_memory_range,
702 			gKernelArgs.num_physical_memory_ranges);
703 
704 		// On some machines we get several ranges that contain only a few pages
705 		// (or even only one) each, which causes us to run out of MTRRs later.
706 		// So we remove all ranges smaller than 64 KB, hoping that this will
707 		// leave us only with a few larger contiguous ranges (ideally one).
708 		for (int32 i = gKernelArgs.num_physical_memory_ranges - 1; i >= 0;
709 				i--) {
710 			size_t size = gKernelArgs.physical_memory_range[i].size;
711 			if (size < 64 * 1024) {
712 				addr_t start = gKernelArgs.physical_memory_range[i].start;
713 				remove_address_range(gKernelArgs.physical_memory_range,
714 					&gKernelArgs.num_physical_memory_ranges,
715 					MAX_PHYSICAL_MEMORY_RANGE, start, size);
716 			}
717 		}
718 	} else {
719 		// TODO: for now!
720 		dprintf("No extended memory block - using 64 MB (fix me!)\n");
721 		uint32 memSize = 64 * 1024 * 1024;
722 
723 		// We dont have an extended map, assume memory is contiguously mapped
724 		// at 0x0, but leave out the BIOS range ((640k - 1 page) to 1 MB).
725 		gKernelArgs.physical_memory_range[0].start = 0;
726 		gKernelArgs.physical_memory_range[0].size = 0x9f000;
727 		gKernelArgs.physical_memory_range[1].start = 0x100000;
728 		gKernelArgs.physical_memory_range[1].size = memSize;
729 		gKernelArgs.num_physical_memory_ranges = 2;
730 	}
731 
732 	gKernelArgs.arch_args.page_hole = 0xffc00000;
733 }
734 
735 
736 //	#pragma mark -
737 
738 
739 extern "C" status_t
740 platform_allocate_region(void **_address, size_t size, uint8 protection,
741 	bool /*exactAddress*/)
742 {
743 	void *address = mmu_allocate(*_address, size);
744 	if (address == NULL)
745 		return B_NO_MEMORY;
746 
747 	*_address = address;
748 	return B_OK;
749 }
750 
751 
752 extern "C" status_t
753 platform_free_region(void *address, size_t size)
754 {
755 	mmu_free(address, size);
756 	return B_OK;
757 }
758 
759 
760 void
761 platform_release_heap(struct stage2_args *args, void *base)
762 {
763 	// It will be freed automatically, since it is in the
764 	// identity mapped region, and not stored in the kernel's
765 	// page tables.
766 }
767 
768 
769 status_t
770 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
771 {
772 	void *heap = (void *)get_next_physical_address(args->heap_size);
773 	if (heap == NULL)
774 		return B_NO_MEMORY;
775 
776 	*_base = heap;
777 	*_top = (void *)((int8 *)heap + args->heap_size);
778 	return B_OK;
779 }
780 
781 
782