xref: /haiku/src/system/boot/platform/bios_ia32/mmu.cpp (revision 5c6260dc232fcb2d4d5d1103c1623dba9663b753)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 
11 #include <string.h>
12 
13 #include <OS.h>
14 
15 #include <arch/cpu.h>
16 #include <arch_kernel.h>
17 #include <boot/platform.h>
18 #include <boot/stdio.h>
19 #include <boot/kernel_args.h>
20 #include <boot/stage2.h>
21 #include <kernel.h>
22 
23 #include "bios.h"
24 #include "interrupts.h"
25 
26 
27 /*!	The (physical) memory layout of the boot loader is currently as follows:
28 	  0x0500 - 0x10000	protected mode stack
29 	  0x0500 - 0x09000	real mode stack
30 	 0x10000 - ?		code (up to ~500 kB)
31 	 0x90000			1st temporary page table (identity maps 0-4 MB)
32 	 0x91000			2nd (4-8 MB)
33 	 0x92000 - 0x92000	further page tables
34 	 0x9e000 - 0xa0000	SMP trampoline code
35 	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
36 	0x100000			page directory
37 	     ...			boot loader heap (32 kB)
38 	     ...			free physical memory
39 
40 	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
41 	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
42 	loader (kernel args, modules, driver settings, ...) comes after
43 	0x80020000 which means that there is currently only 2 MB reserved for
44 	the kernel itself (see kMaxKernelSize).
45 
46 	The layout in PXE mode differs a bit from this, see definitions below.
47 */
48 
49 //#define TRACE_MMU
50 #ifdef TRACE_MMU
51 #	define TRACE(x...) dprintf(x)
52 #else
53 #	define TRACE(x...) ;
54 #endif
55 
56 
57 //#define TRACE_MEMORY_MAP
58 	// Define this to print the memory map to serial debug,
59 	// You also need to define ENABLE_SERIAL in serial.cpp
60 	// for output to work.
61 
62 
63 // memory structure returned by int 0x15, ax 0xe820
64 struct extended_memory {
65 	uint64 base_addr;
66 	uint64 length;
67 	uint32 type;
68 };
69 
70 
71 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
72 static const size_t kMaxKernelSize = 0x1000000;		// 16 MB for the kernel
73 
74 // working page directory and page table
75 static uint32 *sPageDirectory = 0;
76 
77 #ifdef _PXE_ENV
78 
79 static addr_t sNextPhysicalAddress = 0x112000;
80 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
81 
82 static addr_t sNextPageTableAddress = 0x7d000;
83 static const uint32 kPageTableRegionEnd = 0x8b000;
84 	// we need to reserve 2 pages for the SMP trampoline code
85 
86 #else
87 
88 static addr_t sNextPhysicalAddress = 0x100000;
89 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
90 
91 static addr_t sNextPageTableAddress = 0x90000;
92 static const uint32 kPageTableRegionEnd = 0x9e000;
93 	// we need to reserve 2 pages for the SMP trampoline code
94 
95 #endif
96 
97 
98 static addr_t
99 get_next_virtual_address(size_t size)
100 {
101 	addr_t address = sNextVirtualAddress;
102 	sNextVirtualAddress += size;
103 
104 	return address;
105 }
106 
107 
108 static addr_t
109 get_next_physical_address(size_t size)
110 {
111 	phys_addr_t base;
112 	if (!get_free_physical_address_range(gKernelArgs.physical_allocated_range,
113 			gKernelArgs.num_physical_allocated_ranges, sNextPhysicalAddress,
114 			size, &base)) {
115 		panic("Out of physical memory!");
116 		return 0;
117 	}
118 
119 	insert_physical_allocated_range(base, size);
120 	sNextPhysicalAddress = base + size;
121 		// TODO: Can overflow theoretically.
122 
123 	return base;
124 }
125 
126 
127 static addr_t
128 get_next_virtual_page()
129 {
130 	return get_next_virtual_address(B_PAGE_SIZE);
131 }
132 
133 
134 static addr_t
135 get_next_physical_page()
136 {
137 	return get_next_physical_address(B_PAGE_SIZE);
138 }
139 
140 
141 static uint32 *
142 get_next_page_table()
143 {
144 	TRACE("get_next_page_table, sNextPageTableAddress %#" B_PRIxADDR
145 		", kPageTableRegionEnd %#" B_PRIxADDR "\n", sNextPageTableAddress,
146 		kPageTableRegionEnd);
147 
148 	addr_t address = sNextPageTableAddress;
149 	if (address >= kPageTableRegionEnd)
150 		return (uint32 *)get_next_physical_page();
151 
152 	sNextPageTableAddress += B_PAGE_SIZE;
153 	return (uint32 *)address;
154 }
155 
156 
157 /*!	Adds a new page table for the specified base address */
158 static uint32*
159 add_page_table(addr_t base)
160 {
161 	base = ROUNDDOWN(base, B_PAGE_SIZE * 1024);
162 
163 	// Get new page table and clear it out
164 	uint32 *pageTable = get_next_page_table();
165 	if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
166 		panic("tried to add page table beyond the identity mapped 8 MB "
167 			"region\n");
168 		return NULL;
169 	}
170 
171 	TRACE("add_page_table(base = %p), got page: %p\n", (void*)base, pageTable);
172 
173 	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++]
174 		= (uint32)pageTable;
175 
176 	for (int32 i = 0; i < 1024; i++)
177 		pageTable[i] = 0;
178 
179 	// put the new page table into the page directory
180 	sPageDirectory[base / (4 * 1024 * 1024)]
181 		= (uint32)pageTable | kDefaultPageTableFlags;
182 
183 	// update the virtual end address in the kernel args
184 	base += B_PAGE_SIZE * 1024;
185 	if (base > gKernelArgs.arch_args.virtual_end)
186 		gKernelArgs.arch_args.virtual_end = base;
187 
188 	return pageTable;
189 }
190 
191 
192 static void
193 unmap_page(addr_t virtualAddress)
194 {
195 	TRACE("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress);
196 
197 	if (virtualAddress < KERNEL_BASE) {
198 		panic("unmap_page: asked to unmap invalid page %p!\n",
199 			(void *)virtualAddress);
200 	}
201 
202 	// unmap the page from the correct page table
203 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
204 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
205 	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
206 
207 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
208 }
209 
210 
211 /*!	Creates an entry to map the specified virtualAddress to the given
212 	physicalAddress.
213 	If the mapping goes beyond the current page table, it will allocate
214 	a new one. If it cannot map the requested page, it panics.
215 */
216 static void
217 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
218 {
219 	TRACE("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress,
220 		physicalAddress);
221 
222 	if (virtualAddress < KERNEL_BASE) {
223 		panic("map_page: asked to map invalid page %p!\n",
224 			(void *)virtualAddress);
225 	}
226 
227 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
228 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
229 
230 	if (pageTable == NULL) {
231 		// we need to add a new page table
232 		pageTable = add_page_table(virtualAddress);
233 
234 		if (pageTable == NULL) {
235 			panic("map_page: failed to allocate a page table for virtual "
236 				"address %p\n", (void*)virtualAddress);
237 			return;
238 		}
239 	}
240 
241 	physicalAddress &= ~(B_PAGE_SIZE - 1);
242 
243 	// map the page to the correct page table
244 	uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE;
245 
246 	TRACE("map_page: inserting pageTable %p, tableEntry %" B_PRIu32
247 		", physicalAddress %#" B_PRIxADDR "\n", pageTable, tableEntry,
248 		physicalAddress);
249 
250 	pageTable[tableEntry] = physicalAddress | flags;
251 
252 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
253 
254 	TRACE("map_page: done\n");
255 }
256 
257 
258 #ifdef TRACE_MEMORY_MAP
259 static const char *
260 e820_memory_type(uint32 type)
261 {
262 	switch (type) {
263 		case 1: return "memory";
264 		case 2: return "reserved";
265 		case 3: return "ACPI reclaim";
266 		case 4: return "ACPI NVS";
267 		default: return "unknown/reserved";
268 	}
269 }
270 #endif
271 
272 
273 static uint32
274 get_memory_map(extended_memory **_extendedMemory)
275 {
276 	extended_memory *block = (extended_memory *)kExtraSegmentScratch;
277 	bios_regs regs = {0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
278 	uint32 count = 0;
279 
280 	TRACE("get_memory_map()\n");
281 
282 	do {
283 		regs.eax = 0xe820;
284 		regs.edx = 'SMAP';
285 
286 		call_bios(0x15, &regs);
287 		if ((regs.flags & CARRY_FLAG) != 0)
288 			return 0;
289 
290 		regs.edi += sizeof(extended_memory);
291 		count++;
292 	} while (regs.ebx != 0);
293 
294 	*_extendedMemory = block;
295 
296 #ifdef TRACE_MEMORY_MAP
297 	dprintf("extended memory info (from 0xe820):\n");
298 	for (uint32 i = 0; i < count; i++) {
299 		dprintf("    base 0x%08Lx, len 0x%08Lx, type %lu (%s)\n",
300 			block[i].base_addr, block[i].length,
301 			block[i].type, e820_memory_type(block[i].type));
302 	}
303 #endif
304 
305 	return count;
306 }
307 
308 
309 static void
310 init_page_directory(void)
311 {
312 	TRACE("init_page_directory\n");
313 
314 	// allocate a new pgdir
315 	sPageDirectory = (uint32 *)get_next_physical_page();
316 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
317 
318 	// clear out the pgdir
319 	for (int32 i = 0; i < 1024; i++) {
320 		sPageDirectory[i] = 0;
321 	}
322 
323 	// Identity map the first 8 MB of memory so that their
324 	// physical and virtual address are the same.
325 	// These page tables won't be taken over into the kernel.
326 
327 	// make the first page table at the first free spot
328 	uint32 *pageTable = get_next_page_table();
329 
330 	for (int32 i = 0; i < 1024; i++) {
331 		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
332 	}
333 
334 	sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
335 
336 	// make the second page table
337 	pageTable = get_next_page_table();
338 
339 	for (int32 i = 0; i < 1024; i++) {
340 		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
341 	}
342 
343 	sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
344 
345 	gKernelArgs.arch_args.num_pgtables = 0;
346 
347 	// switch to the new pgdir and enable paging
348 	asm("movl %0, %%eax;"
349 		"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
350 	// Important.  Make sure supervisor threads can fault on read only pages...
351 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
352 }
353 
354 
355 //	#pragma mark -
356 
357 
358 /*!
359 	Neither \a virtualAddress nor \a size need to be aligned, but the function
360 	will map all pages the range intersects with.
361 	If physicalAddress is not page-aligned, the returned virtual address will
362 	have the same "misalignment".
363 */
364 extern "C" addr_t
365 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
366 {
367 	addr_t address = sNextVirtualAddress;
368 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
369 
370 	physicalAddress -= pageOffset;
371 	size += pageOffset;
372 
373 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
374 		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
375 	}
376 
377 	return address + pageOffset;
378 }
379 
380 
381 extern "C" void *
382 mmu_allocate(void *virtualAddress, size_t size)
383 {
384 	TRACE("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: "
385 		"%ld\n", virtualAddress, sNextVirtualAddress, size);
386 
387 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
388 		// get number of pages to map
389 
390 	if (virtualAddress != NULL) {
391 		// This special path is almost only useful for loading the
392 		// kernel into memory; it will only allow you to map the
393 		// 'kMaxKernelSize' bytes following the kernel base address.
394 		// Also, it won't check for already mapped addresses, so
395 		// you better know why you are here :)
396 		addr_t address = (addr_t)virtualAddress;
397 
398 		// is the address within the valid range?
399 		if (address < KERNEL_BASE
400 			|| address + size >= KERNEL_BASE + kMaxKernelSize)
401 			return NULL;
402 
403 		for (uint32 i = 0; i < size; i++) {
404 			map_page(address, get_next_physical_page(), kDefaultPageFlags);
405 			address += B_PAGE_SIZE;
406 		}
407 
408 		return virtualAddress;
409 	}
410 
411 	void *address = (void *)sNextVirtualAddress;
412 
413 	for (uint32 i = 0; i < size; i++) {
414 		map_page(get_next_virtual_page(), get_next_physical_page(),
415 			kDefaultPageFlags);
416 	}
417 
418 	return address;
419 }
420 
421 
422 /*!	Allocates the given physical range.
423 	\return \c true, if the range could be allocated, \c false otherwise.
424 */
425 bool
426 mmu_allocate_physical(addr_t base, size_t size)
427 {
428 	// check whether the physical memory range exists at all
429 	if (!is_physical_address_range_covered(gKernelArgs.physical_memory_range,
430 			gKernelArgs.num_physical_memory_ranges, base, size)) {
431 		return false;
432 	}
433 
434 	// check whether the physical range is still free
435 	phys_addr_t foundBase;
436 	if (!get_free_physical_address_range(gKernelArgs.physical_allocated_range,
437 			gKernelArgs.num_physical_allocated_ranges, base, size, &foundBase)
438 		|| foundBase != base) {
439 		return false;
440 	}
441 
442 	return insert_physical_allocated_range(base, size) == B_OK;
443 }
444 
445 
446 /*!	This will unmap the allocated chunk of memory from the virtual
447 	address space. It might not actually free memory (as its implementation
448 	is very simple), but it might.
449 	Neither \a virtualAddress nor \a size need to be aligned, but the function
450 	will unmap all pages the range intersects with.
451 */
452 extern "C" void
453 mmu_free(void *virtualAddress, size_t size)
454 {
455 	TRACE("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size);
456 
457 	addr_t address = (addr_t)virtualAddress;
458 	addr_t pageOffset = address % B_PAGE_SIZE;
459 	address -= pageOffset;
460 	size = (size + pageOffset + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
461 
462 	// is the address within the valid range?
463 	if (address < KERNEL_BASE || address + size > sNextVirtualAddress) {
464 		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
465 			(void *)address, size);
466 	}
467 
468 	// unmap all pages within the range
469 	for (size_t i = 0; i < size; i += B_PAGE_SIZE) {
470 		unmap_page(address);
471 		address += B_PAGE_SIZE;
472 	}
473 
474 	if (address == sNextVirtualAddress) {
475 		// we can actually reuse the virtual address space
476 		sNextVirtualAddress -= size;
477 	}
478 }
479 
480 
481 /*!	Sets up the final and kernel accessible GDT and IDT tables.
482 	BIOS calls won't work any longer after this function has
483 	been called.
484 */
485 extern "C" void
486 mmu_init_for_kernel(void)
487 {
488 	TRACE("mmu_init_for_kernel\n");
489 	// set up a new idt
490 	{
491 		uint32 *idt;
492 
493 		// find a new idt
494 		idt = (uint32 *)get_next_physical_page();
495 		gKernelArgs.arch_args.phys_idt = (uint32)idt;
496 
497 		TRACE("idt at %p\n", idt);
498 
499 		// map the idt into virtual space
500 		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
501 		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
502 
503 		// initialize it
504 		interrupts_init_kernel_idt((void*)gKernelArgs.arch_args.vir_idt,
505 			IDT_LIMIT);
506 
507 		TRACE("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt);
508 	}
509 
510 	// set up a new gdt
511 	{
512 		struct gdt_idt_descr gdtDescriptor;
513 		segment_descriptor *gdt;
514 
515 		// find a new gdt
516 		gdt = (segment_descriptor *)get_next_physical_page();
517 		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
518 
519 		TRACE("gdt at %p\n", gdt);
520 
521 		// map the gdt into virtual space
522 		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
523 		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
524 
525 		// put standard segment descriptors in it
526 		segment_descriptor* virtualGDT
527 			= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
528 		clear_segment_descriptor(&virtualGDT[0]);
529 
530 		// seg 0x08 - kernel 4GB code
531 		set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
532 			DPL_KERNEL);
533 
534 		// seg 0x10 - kernel 4GB data
535 		set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
536 			DPL_KERNEL);
537 
538 		// seg 0x1b - ring 3 user 4GB code
539 		set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
540 			DPL_USER);
541 
542 		// seg 0x23 - ring 3 user 4GB data
543 		set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
544 			DPL_USER);
545 
546 		// virtualGDT[5] and above will be filled later by the kernel
547 		// to contain the TSS descriptors, and for TLS (one for every CPU)
548 
549 		// load the GDT
550 		gdtDescriptor.limit = GDT_LIMIT - 1;
551 		gdtDescriptor.base = (void*)gKernelArgs.arch_args.vir_gdt;
552 
553 		asm("lgdt	%0;"
554 			: : "m" (gdtDescriptor));
555 
556 		TRACE("gdt at virtual address %p\n",
557 			(void*)gKernelArgs.arch_args.vir_gdt);
558 	}
559 
560 	// Save the memory we've virtually allocated (for the kernel and other
561 	// stuff)
562 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
563 	gKernelArgs.virtual_allocated_range[0].size
564 		= sNextVirtualAddress - KERNEL_BASE;
565 	gKernelArgs.num_virtual_allocated_ranges = 1;
566 
567 	// sort the address ranges
568 	sort_physical_address_ranges(gKernelArgs.physical_memory_range,
569 		gKernelArgs.num_physical_memory_ranges);
570 	sort_physical_address_ranges(gKernelArgs.physical_allocated_range,
571 		gKernelArgs.num_physical_allocated_ranges);
572 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
573 		gKernelArgs.num_virtual_allocated_ranges);
574 
575 #ifdef TRACE_MEMORY_MAP
576 	{
577 		uint32 i;
578 
579 		dprintf("phys memory ranges:\n");
580 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
581 			dprintf("    base %#018" B_PRIxPHYSADDR ", length %#018"
582 				B_PRIxPHYSADDR "\n", gKernelArgs.physical_memory_range[i].start,
583 				gKernelArgs.physical_memory_range[i].size);
584 		}
585 
586 		dprintf("allocated phys memory ranges:\n");
587 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
588 			dprintf("    base %#018" B_PRIxPHYSADDR ", length %#018"
589 				B_PRIxPHYSADDR "\n",
590 				gKernelArgs.physical_allocated_range[i].start,
591 				gKernelArgs.physical_allocated_range[i].size);
592 		}
593 
594 		dprintf("allocated virt memory ranges:\n");
595 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
596 			dprintf("    base %#018" B_PRIxADDR ", length %#018" B_PRIxSIZE
597 			"\n", gKernelArgs.virtual_allocated_range[i].start,
598 			gKernelArgs.virtual_allocated_range[i].size);
599 		}
600 	}
601 #endif
602 }
603 
604 
605 extern "C" void
606 mmu_init(void)
607 {
608 	TRACE("mmu_init\n");
609 
610 	gKernelArgs.arch_args.virtual_end = KERNEL_BASE;
611 
612 	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
613 	gKernelArgs.physical_allocated_range[0].size = 0;
614 	gKernelArgs.num_physical_allocated_ranges = 1;
615 		// remember the start of the allocated physical pages
616 
617 	init_page_directory();
618 
619 	// Map the page directory into kernel space at 0xffc00000-0xffffffff
620 	// this enables a mmu trick where the 4 MB region that this pgdir entry
621 	// represents now maps the 4MB of potential pagetables that the pgdir
622 	// points to. Thrown away later in VM bringup, but useful for now.
623 	sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
624 
625 	// also map it on the next vpage
626 	gKernelArgs.arch_args.vir_pgdir = get_next_virtual_page();
627 	map_page(gKernelArgs.arch_args.vir_pgdir, (uint32)sPageDirectory,
628 		kDefaultPageFlags);
629 
630 	// map in a kernel stack
631 	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
632 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
633 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
634 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
635 
636 	TRACE("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
637 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size);
638 
639 	extended_memory *extMemoryBlock;
640 	uint32 extMemoryCount = get_memory_map(&extMemoryBlock);
641 
642 	// figure out the memory map
643 	if (extMemoryCount > 0) {
644 		gKernelArgs.num_physical_memory_ranges = 0;
645 
646 		for (uint32 i = 0; i < extMemoryCount; i++) {
647 			// Type 1 is available memory
648 			if (extMemoryBlock[i].type == 1) {
649 				uint64 base = extMemoryBlock[i].base_addr;
650 				uint64 length = extMemoryBlock[i].length;
651 				uint64 end = base + length;
652 
653 				// round everything up to page boundaries, exclusive of pages
654 				// it partially occupies
655 				base = ROUNDUP(base, B_PAGE_SIZE);
656 				end = ROUNDDOWN(end, B_PAGE_SIZE);
657 
658 				// We ignore all memory beyond 4 GB, if phys_addr_t is only
659 				// 32 bit wide.
660 				#if B_HAIKU_PHYSICAL_BITS == 32
661 					if (end > 0x100000000ULL)
662 						end = 0x100000000ULL;
663 				#endif
664 
665 				// Also ignore memory below 1 MB. Apparently some BIOSes fail to
666 				// provide the correct range type for some ranges (cf. #1925).
667 				// Later in the kernel we will reserve the range 0x0 - 0xa0000
668 				// and apparently 0xa0000 - 0x100000 never contain usable
669 				// memory, so we don't lose anything by doing that.
670 				if (base < 0x100000)
671 					base = 0x100000;
672 
673 				gKernelArgs.ignored_physical_memory
674 					+= length - (max_c(end, base) - base);
675 
676 				if (end <= base)
677 					continue;
678 
679 				if (insert_physical_memory_range(base, end - base) != B_OK) {
680 					panic("mmu_init(): Failed to add physical memory range "
681 						"%#" B_PRIx64 " - %#" B_PRIx64 "\n", base, end);
682 				}
683 			} else if (extMemoryBlock[i].type == 3) {
684 				// ACPI reclaim -- physical memory we could actually use later
685 				gKernelArgs.ignored_physical_memory += extMemoryBlock[i].length;
686 			}
687 		}
688 
689 		// sort the ranges
690 		sort_physical_address_ranges(gKernelArgs.physical_memory_range,
691 			gKernelArgs.num_physical_memory_ranges);
692 
693 		// On some machines we get several ranges that contain only a few pages
694 		// (or even only one) each, which causes us to run out of MTRRs later.
695 		// So we remove all ranges smaller than 64 KB, hoping that this will
696 		// leave us only with a few larger contiguous ranges (ideally one).
697 		for (int32 i = gKernelArgs.num_physical_memory_ranges - 1; i >= 0;
698 				i--) {
699 			size_t size = gKernelArgs.physical_memory_range[i].size;
700 			if (size < 64 * 1024) {
701 				addr_t start = gKernelArgs.physical_memory_range[i].start;
702 				remove_physical_address_range(gKernelArgs.physical_memory_range,
703 					&gKernelArgs.num_physical_memory_ranges,
704 					MAX_PHYSICAL_MEMORY_RANGE, start, size);
705 			}
706 		}
707 	} else {
708 		bios_regs regs;
709 
710 		// We dont have an extended map, assume memory is contiguously mapped
711 		// at 0x0, but leave out the BIOS range ((640k - 1 page) to 1 MB).
712 		gKernelArgs.physical_memory_range[0].start = 0;
713 		gKernelArgs.physical_memory_range[0].size = 0x9f000;
714 		gKernelArgs.physical_memory_range[1].start = 0x100000;
715 
716 		regs.eax = 0xe801; // AX
717 		call_bios(0x15, &regs);
718 		if ((regs.flags & CARRY_FLAG) != 0) {
719 			regs.eax = 0x8800; // AH 88h
720 			call_bios(0x15, &regs);
721 			if ((regs.flags & CARRY_FLAG) != 0) {
722 				// TODO: for now!
723 				dprintf("No memory size - using 64 MB (fix me!)\n");
724 				uint32 memSize = 64 * 1024 * 1024;
725 				gKernelArgs.physical_memory_range[1].size = memSize - 0x100000;
726 			} else {
727 				dprintf("Get Extended Memory Size succeeded.\n");
728 				gKernelArgs.physical_memory_range[1].size = regs.eax * 1024;
729 			}
730 			gKernelArgs.num_physical_memory_ranges = 2;
731 		} else {
732 			dprintf("Get Memory Size for Large Configurations succeeded.\n");
733 			gKernelArgs.physical_memory_range[1].size = regs.ecx * 1024;
734 			gKernelArgs.physical_memory_range[2].start = 0x1000000;
735 			gKernelArgs.physical_memory_range[2].size = regs.edx * 64 * 1024;
736 			gKernelArgs.num_physical_memory_ranges = 3;
737 		}
738 	}
739 
740 	gKernelArgs.arch_args.page_hole = 0xffc00000;
741 }
742 
743 
744 //	#pragma mark -
745 
746 
747 extern "C" status_t
748 platform_allocate_region(void **_address, size_t size, uint8 protection,
749 	bool /*exactAddress*/)
750 {
751 	void *address = mmu_allocate(*_address, size);
752 	if (address == NULL)
753 		return B_NO_MEMORY;
754 
755 	*_address = address;
756 	return B_OK;
757 }
758 
759 
760 extern "C" status_t
761 platform_free_region(void *address, size_t size)
762 {
763 	mmu_free(address, size);
764 	return B_OK;
765 }
766 
767 
768 void
769 platform_release_heap(struct stage2_args *args, void *base)
770 {
771 	// It will be freed automatically, since it is in the
772 	// identity mapped region, and not stored in the kernel's
773 	// page tables.
774 }
775 
776 
777 status_t
778 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
779 {
780 	void *heap = (void *)get_next_physical_address(args->heap_size);
781 	if (heap == NULL)
782 		return B_NO_MEMORY;
783 
784 	*_base = heap;
785 	*_top = (void *)((int8 *)heap + args->heap_size);
786 	return B_OK;
787 }
788 
789 
790