xref: /haiku/src/system/boot/platform/bios_ia32/mmu.cpp (revision a629567a9001547736cfe892cdf992be16868fed)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 
11 #include <string.h>
12 
13 #include <OS.h>
14 
15 #include <arch/cpu.h>
16 #include <arch/x86/descriptors.h>
17 #include <arch_kernel.h>
18 #include <boot/platform.h>
19 #include <boot/stdio.h>
20 #include <boot/kernel_args.h>
21 #include <boot/stage2.h>
22 #include <kernel.h>
23 
24 #include "bios.h"
25 #include "interrupts.h"
26 
27 
28 /*!	The (physical) memory layout of the boot loader is currently as follows:
29 	  0x0500 - 0x10000	protected mode stack
30 	  0x0500 - 0x09000	real mode stack
31 	 0x10000 - ?		code (up to ~500 kB)
32 	 0x90000			1st temporary page table (identity maps 0-4 MB)
33 	 0x91000			2nd (4-8 MB)
34 	 0x92000 - 0x92000	further page tables
35 	 0x9e000 - 0xa0000	SMP trampoline code
36 	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
37 	0x100000			page directory
38 	     ...			boot loader heap (32 kB)
39 	     ...			free physical memory
40 
41 	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
42 	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
43 	loader (kernel args, modules, driver settings, ...) comes after
44 	0x80020000 which means that there is currently only 2 MB reserved for
45 	the kernel itself (see kMaxKernelSize).
46 
47 	The layout in PXE mode differs a bit from this, see definitions below.
48 */
49 
50 //#define TRACE_MMU
51 #ifdef TRACE_MMU
52 #	define TRACE(x...) dprintf(x)
53 #else
54 #	define TRACE(x...) ;
55 #endif
56 
57 
58 //#define TRACE_MEMORY_MAP
59 	// Define this to print the memory map to serial debug,
60 	// You also need to define ENABLE_SERIAL in serial.cpp
61 	// for output to work.
62 
63 
64 // memory structure returned by int 0x15, ax 0xe820
65 struct extended_memory {
66 	uint64 base_addr;
67 	uint64 length;
68 	uint32 type;
69 };
70 
71 
72 segment_descriptor gBootGDT[BOOT_GDT_SEGMENT_COUNT];
73 
74 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
75 static const size_t kMaxKernelSize = 0x1000000;		// 16 MB for the kernel
76 
77 // working page directory and page table
78 static uint32 *sPageDirectory = 0;
79 
80 #ifdef _PXE_ENV
81 
82 static addr_t sNextPhysicalAddress = 0x112000;
83 static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
84 
85 static addr_t sNextPageTableAddress = 0x7d000;
86 static const uint32 kPageTableRegionEnd = 0x8b000;
87 	// we need to reserve 2 pages for the SMP trampoline code
88 
89 #else
90 
91 static addr_t sNextPhysicalAddress = 0x100000;
92 static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
93 
94 static addr_t sNextPageTableAddress = 0x90000;
95 static const uint32 kPageTableRegionEnd = 0x9e000;
96 	// we need to reserve 2 pages for the SMP trampoline code
97 
98 #endif
99 
100 
101 static addr_t
102 get_next_virtual_address(size_t size)
103 {
104 	addr_t address = sNextVirtualAddress;
105 	sNextVirtualAddress += size;
106 
107 	return address;
108 }
109 
110 
111 static addr_t
112 get_next_physical_address(size_t size)
113 {
114 	uint64 base;
115 	if (!get_free_address_range(gKernelArgs.physical_allocated_range,
116 			gKernelArgs.num_physical_allocated_ranges, sNextPhysicalAddress,
117 			size, &base)) {
118 		panic("Out of physical memory!");
119 		return 0;
120 	}
121 
122 	insert_physical_allocated_range(base, size);
123 	sNextPhysicalAddress = base + size;
124 		// TODO: Can overflow theoretically.
125 
126 	return base;
127 }
128 
129 
130 static addr_t
131 get_next_virtual_page()
132 {
133 	return get_next_virtual_address(B_PAGE_SIZE);
134 }
135 
136 
137 static addr_t
138 get_next_physical_page()
139 {
140 	return get_next_physical_address(B_PAGE_SIZE);
141 }
142 
143 
144 static uint32 *
145 get_next_page_table()
146 {
147 	TRACE("get_next_page_table, sNextPageTableAddress %#" B_PRIxADDR
148 		", kPageTableRegionEnd %#" B_PRIxADDR "\n", sNextPageTableAddress,
149 		kPageTableRegionEnd);
150 
151 	addr_t address = sNextPageTableAddress;
152 	if (address >= kPageTableRegionEnd)
153 		return (uint32 *)get_next_physical_page();
154 
155 	sNextPageTableAddress += B_PAGE_SIZE;
156 	return (uint32 *)address;
157 }
158 
159 
160 /*!	Adds a new page table for the specified base address */
161 static uint32*
162 add_page_table(addr_t base)
163 {
164 	base = ROUNDDOWN(base, B_PAGE_SIZE * 1024);
165 
166 	// Get new page table and clear it out
167 	uint32 *pageTable = get_next_page_table();
168 	if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
169 		panic("tried to add page table beyond the identity mapped 8 MB "
170 			"region\n");
171 		return NULL;
172 	}
173 
174 	TRACE("add_page_table(base = %p), got page: %p\n", (void*)base, pageTable);
175 
176 	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++]
177 		= (uint32)pageTable;
178 
179 	for (int32 i = 0; i < 1024; i++)
180 		pageTable[i] = 0;
181 
182 	// put the new page table into the page directory
183 	sPageDirectory[base / (4 * 1024 * 1024)]
184 		= (uint32)pageTable | kDefaultPageTableFlags;
185 
186 	// update the virtual end address in the kernel args
187 	base += B_PAGE_SIZE * 1024;
188 	if (base > gKernelArgs.arch_args.virtual_end)
189 		gKernelArgs.arch_args.virtual_end = base;
190 
191 	return pageTable;
192 }
193 
194 
195 static void
196 unmap_page(addr_t virtualAddress)
197 {
198 	TRACE("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress);
199 
200 	if (virtualAddress < KERNEL_LOAD_BASE) {
201 		panic("unmap_page: asked to unmap invalid page %p!\n",
202 			(void *)virtualAddress);
203 	}
204 
205 	// unmap the page from the correct page table
206 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
207 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
208 	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
209 
210 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
211 }
212 
213 
214 /*!	Creates an entry to map the specified virtualAddress to the given
215 	physicalAddress.
216 	If the mapping goes beyond the current page table, it will allocate
217 	a new one. If it cannot map the requested page, it panics.
218 */
219 static void
220 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
221 {
222 	TRACE("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress,
223 		physicalAddress);
224 
225 	if (virtualAddress < KERNEL_LOAD_BASE) {
226 		panic("map_page: asked to map invalid page %p!\n",
227 			(void *)virtualAddress);
228 	}
229 
230 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
231 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
232 
233 	if (pageTable == NULL) {
234 		// we need to add a new page table
235 		pageTable = add_page_table(virtualAddress);
236 
237 		if (pageTable == NULL) {
238 			panic("map_page: failed to allocate a page table for virtual "
239 				"address %p\n", (void*)virtualAddress);
240 			return;
241 		}
242 	}
243 
244 	physicalAddress &= ~(B_PAGE_SIZE - 1);
245 
246 	// map the page to the correct page table
247 	uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE;
248 
249 	TRACE("map_page: inserting pageTable %p, tableEntry %" B_PRIu32
250 		", physicalAddress %#" B_PRIxADDR "\n", pageTable, tableEntry,
251 		physicalAddress);
252 
253 	pageTable[tableEntry] = physicalAddress | flags;
254 
255 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
256 
257 	TRACE("map_page: done\n");
258 }
259 
260 
261 #ifdef TRACE_MEMORY_MAP
262 static const char *
263 e820_memory_type(uint32 type)
264 {
265 	switch (type) {
266 		case 1: return "memory";
267 		case 2: return "reserved";
268 		case 3: return "ACPI reclaim";
269 		case 4: return "ACPI NVS";
270 		default: return "unknown/reserved";
271 	}
272 }
273 #endif
274 
275 
276 static uint32
277 get_memory_map(extended_memory **_extendedMemory)
278 {
279 	extended_memory *block = (extended_memory *)kExtraSegmentScratch;
280 	bios_regs regs = {0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
281 	uint32 count = 0;
282 
283 	TRACE("get_memory_map()\n");
284 
285 	do {
286 		regs.eax = 0xe820;
287 		regs.edx = 'SMAP';
288 
289 		call_bios(0x15, &regs);
290 		if ((regs.flags & CARRY_FLAG) != 0)
291 			return 0;
292 
293 		regs.edi += sizeof(extended_memory);
294 		count++;
295 	} while (regs.ebx != 0);
296 
297 	*_extendedMemory = block;
298 
299 #ifdef TRACE_MEMORY_MAP
300 	dprintf("extended memory info (from 0xe820):\n");
301 	for (uint32 i = 0; i < count; i++) {
302 		dprintf("    base 0x%08Lx, len 0x%08Lx, type %lu (%s)\n",
303 			block[i].base_addr, block[i].length,
304 			block[i].type, e820_memory_type(block[i].type));
305 	}
306 #endif
307 
308 	return count;
309 }
310 
311 
312 static void
313 init_page_directory(void)
314 {
315 	TRACE("init_page_directory\n");
316 
317 	// allocate a new pgdir
318 	sPageDirectory = (uint32 *)get_next_physical_page();
319 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
320 
321 	// clear out the pgdir
322 	for (int32 i = 0; i < 1024; i++) {
323 		sPageDirectory[i] = 0;
324 	}
325 
326 	// Identity map the first 8 MB of memory so that their
327 	// physical and virtual address are the same.
328 	// These page tables won't be taken over into the kernel.
329 
330 	// make the first page table at the first free spot
331 	uint32 *pageTable = get_next_page_table();
332 
333 	for (int32 i = 0; i < 1024; i++) {
334 		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
335 	}
336 
337 	sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
338 
339 	// make the second page table
340 	pageTable = get_next_page_table();
341 
342 	for (int32 i = 0; i < 1024; i++) {
343 		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
344 	}
345 
346 	sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
347 
348 	gKernelArgs.arch_args.num_pgtables = 0;
349 
350 	// switch to the new pgdir and enable paging
351 	asm("movl %0, %%eax;"
352 		"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
353 	// Important.  Make sure supervisor threads can fault on read only pages...
354 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
355 }
356 
357 
358 //	#pragma mark -
359 
360 
361 /*!
362 	Neither \a virtualAddress nor \a size need to be aligned, but the function
363 	will map all pages the range intersects with.
364 	If physicalAddress is not page-aligned, the returned virtual address will
365 	have the same "misalignment".
366 */
367 extern "C" addr_t
368 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
369 {
370 	addr_t address = sNextVirtualAddress;
371 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
372 
373 	physicalAddress -= pageOffset;
374 	size += pageOffset;
375 
376 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
377 		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
378 	}
379 
380 	return address + pageOffset;
381 }
382 
383 
384 extern "C" void *
385 mmu_allocate(void *virtualAddress, size_t size)
386 {
387 	TRACE("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: "
388 		"%ld\n", virtualAddress, sNextVirtualAddress, size);
389 
390 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
391 		// get number of pages to map
392 
393 	if (virtualAddress != NULL) {
394 		// This special path is almost only useful for loading the
395 		// kernel into memory; it will only allow you to map the
396 		// 'kMaxKernelSize' bytes following the kernel base address.
397 		// Also, it won't check for already mapped addresses, so
398 		// you better know why you are here :)
399 		addr_t address = (addr_t)virtualAddress;
400 
401 		// is the address within the valid range?
402 		if (address < KERNEL_LOAD_BASE || address + size * B_PAGE_SIZE
403 			>= KERNEL_LOAD_BASE + kMaxKernelSize)
404 			return NULL;
405 
406 		for (uint32 i = 0; i < size; i++) {
407 			map_page(address, get_next_physical_page(), kDefaultPageFlags);
408 			address += B_PAGE_SIZE;
409 		}
410 
411 		return virtualAddress;
412 	}
413 
414 	void *address = (void *)sNextVirtualAddress;
415 
416 	for (uint32 i = 0; i < size; i++) {
417 		map_page(get_next_virtual_page(), get_next_physical_page(),
418 			kDefaultPageFlags);
419 	}
420 
421 	return address;
422 }
423 
424 
425 /*!	Allocates a single page and returns both its virtual and physical
426 	addresses.
427 */
428 void *
429 mmu_allocate_page(addr_t *_physicalAddress)
430 {
431 	addr_t virt = get_next_virtual_page();
432 	addr_t phys = get_next_physical_page();
433 
434 	map_page(virt, phys, kDefaultPageFlags);
435 
436 	if (_physicalAddress)
437 		*_physicalAddress = phys;
438 
439 	return (void *)virt;
440 }
441 
442 
443 /*!	Allocates the given physical range.
444 	\return \c true, if the range could be allocated, \c false otherwise.
445 */
446 bool
447 mmu_allocate_physical(addr_t base, size_t size)
448 {
449 	// check whether the physical memory range exists at all
450 	if (!is_address_range_covered(gKernelArgs.physical_memory_range,
451 			gKernelArgs.num_physical_memory_ranges, base, size)) {
452 		return false;
453 	}
454 
455 	// check whether the physical range is still free
456 	uint64 foundBase;
457 	if (!get_free_address_range(gKernelArgs.physical_allocated_range,
458 			gKernelArgs.num_physical_allocated_ranges, base, size, &foundBase)
459 		|| foundBase != base) {
460 		return false;
461 	}
462 
463 	return insert_physical_allocated_range(base, size) == B_OK;
464 }
465 
466 
467 /*!	This will unmap the allocated chunk of memory from the virtual
468 	address space. It might not actually free memory (as its implementation
469 	is very simple), but it might.
470 	Neither \a virtualAddress nor \a size need to be aligned, but the function
471 	will unmap all pages the range intersects with.
472 */
473 extern "C" void
474 mmu_free(void *virtualAddress, size_t size)
475 {
476 	TRACE("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size);
477 
478 	addr_t address = (addr_t)virtualAddress;
479 	addr_t pageOffset = address % B_PAGE_SIZE;
480 	address -= pageOffset;
481 	size = (size + pageOffset + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
482 
483 	// is the address within the valid range?
484 	if (address < KERNEL_LOAD_BASE || address + size > sNextVirtualAddress) {
485 		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
486 			(void *)address, size);
487 	}
488 
489 	// unmap all pages within the range
490 	for (size_t i = 0; i < size; i += B_PAGE_SIZE) {
491 		unmap_page(address);
492 		address += B_PAGE_SIZE;
493 	}
494 
495 	if (address == sNextVirtualAddress) {
496 		// we can actually reuse the virtual address space
497 		sNextVirtualAddress -= size;
498 	}
499 }
500 
501 
502 size_t
503 mmu_get_virtual_usage()
504 {
505 	return sNextVirtualAddress - KERNEL_LOAD_BASE;
506 }
507 
508 
509 bool
510 mmu_get_virtual_mapping(addr_t virtualAddress, addr_t *_physicalAddress)
511 {
512 	if (virtualAddress < KERNEL_LOAD_BASE) {
513 		panic("mmu_get_virtual_mapping: asked to lookup invalid page %p!\n",
514 			(void *)virtualAddress);
515 	}
516 
517 	uint32 dirEntry = sPageDirectory[virtualAddress / (B_PAGE_SIZE * 1024)];
518 	if ((dirEntry & (1 << 0)) == 0)
519 		return false;
520 
521 	uint32 *pageTable = (uint32 *)(dirEntry & 0xfffff000);
522 	uint32 tableEntry = pageTable[(virtualAddress % (B_PAGE_SIZE * 1024))
523 		/ B_PAGE_SIZE];
524 	if ((tableEntry & (1 << 0)) == 0)
525 		return false;
526 
527 	*_physicalAddress = tableEntry & 0xfffff000;
528 	return true;
529 }
530 
531 
532 /*!	Sets up the final and kernel accessible GDT and IDT tables.
533 	BIOS calls won't work any longer after this function has
534 	been called.
535 */
536 extern "C" void
537 mmu_init_for_kernel(void)
538 {
539 	TRACE("mmu_init_for_kernel\n");
540 
541 	// set up a new gdt
542 
543 	// put standard segment descriptors in GDT
544 	clear_segment_descriptor(&gBootGDT[0]);
545 
546 	// seg 0x08 - kernel 4GB code
547 	set_segment_descriptor(&gBootGDT[KERNEL_CODE_SEGMENT], 0, 0xffffffff,
548 		DT_CODE_READABLE, DPL_KERNEL);
549 
550 	// seg 0x10 - kernel 4GB data
551 	set_segment_descriptor(&gBootGDT[KERNEL_DATA_SEGMENT], 0, 0xffffffff,
552 		DT_DATA_WRITEABLE, DPL_KERNEL);
553 
554 	// seg 0x1b - ring 3 user 4GB code
555 	set_segment_descriptor(&gBootGDT[USER_CODE_SEGMENT], 0, 0xffffffff,
556 		DT_CODE_READABLE, DPL_USER);
557 
558 	// seg 0x23 - ring 3 user 4GB data
559 	set_segment_descriptor(&gBootGDT[USER_DATA_SEGMENT], 0, 0xffffffff,
560 		DT_DATA_WRITEABLE, DPL_USER);
561 
562 	// load the GDT
563 	struct gdt_idt_descr gdtDescriptor;
564 	gdtDescriptor.limit = sizeof(gBootGDT);
565 	gdtDescriptor.base = gBootGDT;
566 
567 	asm("lgdt %0" : : "m" (gdtDescriptor));
568 
569 	TRACE("gdt at virtual address %p\n", gBootGDT);
570 
571 	// Save the memory we've virtually allocated (for the kernel and other
572 	// stuff)
573 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE;
574 	gKernelArgs.virtual_allocated_range[0].size
575 		= sNextVirtualAddress - KERNEL_LOAD_BASE;
576 	gKernelArgs.num_virtual_allocated_ranges = 1;
577 
578 	// sort the address ranges
579 	sort_address_ranges(gKernelArgs.physical_memory_range,
580 		gKernelArgs.num_physical_memory_ranges);
581 	sort_address_ranges(gKernelArgs.physical_allocated_range,
582 		gKernelArgs.num_physical_allocated_ranges);
583 	sort_address_ranges(gKernelArgs.virtual_allocated_range,
584 		gKernelArgs.num_virtual_allocated_ranges);
585 
586 #ifdef TRACE_MEMORY_MAP
587 	{
588 		uint32 i;
589 
590 		dprintf("phys memory ranges:\n");
591 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
592 			dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
593 				gKernelArgs.physical_memory_range[i].start,
594 				gKernelArgs.physical_memory_range[i].size);
595 		}
596 
597 		dprintf("allocated phys memory ranges:\n");
598 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
599 			dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
600 				gKernelArgs.physical_allocated_range[i].start,
601 				gKernelArgs.physical_allocated_range[i].size);
602 		}
603 
604 		dprintf("allocated virt memory ranges:\n");
605 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
606 			dprintf("    base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
607 				gKernelArgs.virtual_allocated_range[i].start,
608 				gKernelArgs.virtual_allocated_range[i].size);
609 		}
610 	}
611 #endif
612 }
613 
614 
615 extern "C" void
616 mmu_init(void)
617 {
618 	TRACE("mmu_init\n");
619 
620 	gKernelArgs.arch_args.virtual_end = KERNEL_LOAD_BASE;
621 
622 	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
623 	gKernelArgs.physical_allocated_range[0].size = 0;
624 	gKernelArgs.num_physical_allocated_ranges = 1;
625 		// remember the start of the allocated physical pages
626 
627 	init_page_directory();
628 
629 	// Map the page directory into kernel space at 0xffc00000-0xffffffff
630 	// this enables a mmu trick where the 4 MB region that this pgdir entry
631 	// represents now maps the 4MB of potential pagetables that the pgdir
632 	// points to. Thrown away later in VM bringup, but useful for now.
633 	sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
634 
635 	// also map it on the next vpage
636 	gKernelArgs.arch_args.vir_pgdir = get_next_virtual_page();
637 	map_page(gKernelArgs.arch_args.vir_pgdir, (uint32)sPageDirectory,
638 		kDefaultPageFlags);
639 
640 	// map in a kernel stack
641 	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
642 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
643 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
644 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
645 
646 	TRACE("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
647 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size);
648 
649 	extended_memory *extMemoryBlock;
650 	uint32 extMemoryCount = get_memory_map(&extMemoryBlock);
651 
652 	// figure out the memory map
653 	if (extMemoryCount > 0) {
654 		gKernelArgs.num_physical_memory_ranges = 0;
655 
656 		for (uint32 i = 0; i < extMemoryCount; i++) {
657 			// Type 1 is available memory
658 			if (extMemoryBlock[i].type == 1) {
659 				uint64 base = extMemoryBlock[i].base_addr;
660 				uint64 length = extMemoryBlock[i].length;
661 				uint64 end = base + length;
662 
663 				// round everything up to page boundaries, exclusive of pages
664 				// it partially occupies
665 				base = ROUNDUP(base, B_PAGE_SIZE);
666 				end = ROUNDDOWN(end, B_PAGE_SIZE);
667 
668 				// We ignore all memory beyond 4 GB, if phys_addr_t is only
669 				// 32 bit wide.
670 				#if B_HAIKU_PHYSICAL_BITS == 32
671 					if (end > 0x100000000ULL)
672 						end = 0x100000000ULL;
673 				#endif
674 
675 				// Also ignore memory below 1 MB. Apparently some BIOSes fail to
676 				// provide the correct range type for some ranges (cf. #1925).
677 				// Later in the kernel we will reserve the range 0x0 - 0xa0000
678 				// and apparently 0xa0000 - 0x100000 never contain usable
679 				// memory, so we don't lose anything by doing that.
680 				if (base < 0x100000)
681 					base = 0x100000;
682 
683 				gKernelArgs.ignored_physical_memory
684 					+= length - (max_c(end, base) - base);
685 
686 				if (end <= base)
687 					continue;
688 
689 				status_t status = insert_physical_memory_range(base, end - base);
690 				if (status == B_ENTRY_NOT_FOUND) {
691 					panic("mmu_init(): Failed to add physical memory range "
692 						"%#" B_PRIx64 " - %#" B_PRIx64 " : all %d entries are "
693 						"used already!\n", base, end, MAX_PHYSICAL_MEMORY_RANGE);
694 				} else if (status != B_OK) {
695 					panic("mmu_init(): Failed to add physical memory range "
696 						"%#" B_PRIx64 " - %#" B_PRIx64 "\n", base, end);
697 				}
698 			} else if (extMemoryBlock[i].type == 3) {
699 				// ACPI reclaim -- physical memory we could actually use later
700 				gKernelArgs.ignored_physical_memory += extMemoryBlock[i].length;
701 			}
702 		}
703 
704 		// sort the ranges
705 		sort_address_ranges(gKernelArgs.physical_memory_range,
706 			gKernelArgs.num_physical_memory_ranges);
707 
708 		// On some machines we get several ranges that contain only a few pages
709 		// (or even only one) each, which causes us to run out of MTRRs later.
710 		// So we remove all ranges smaller than 64 KB, hoping that this will
711 		// leave us only with a few larger contiguous ranges (ideally one).
712 		for (int32 i = gKernelArgs.num_physical_memory_ranges - 1; i >= 0;
713 				i--) {
714 			uint64 size = gKernelArgs.physical_memory_range[i].size;
715 			if (size < 64 * 1024) {
716 				uint64 start = gKernelArgs.physical_memory_range[i].start;
717 				remove_address_range(gKernelArgs.physical_memory_range,
718 					&gKernelArgs.num_physical_memory_ranges,
719 					MAX_PHYSICAL_MEMORY_RANGE, start, size);
720 			}
721 		}
722 	} else {
723 		bios_regs regs;
724 
725 		// We dont have an extended map, assume memory is contiguously mapped
726 		// at 0x0, but leave out the BIOS range ((640k - 1 page) to 1 MB).
727 		gKernelArgs.physical_memory_range[0].start = 0;
728 		gKernelArgs.physical_memory_range[0].size = 0x9f000;
729 		gKernelArgs.physical_memory_range[1].start = 0x100000;
730 
731 		regs.eax = 0xe801; // AX
732 		call_bios(0x15, &regs);
733 		if ((regs.flags & CARRY_FLAG) != 0) {
734 			regs.eax = 0x8800; // AH 88h
735 			call_bios(0x15, &regs);
736 			if ((regs.flags & CARRY_FLAG) != 0) {
737 				// TODO: for now!
738 				dprintf("No memory size - using 64 MB (fix me!)\n");
739 				uint32 memSize = 64 * 1024 * 1024;
740 				gKernelArgs.physical_memory_range[1].size = memSize - 0x100000;
741 			} else {
742 				dprintf("Get Extended Memory Size succeeded.\n");
743 				gKernelArgs.physical_memory_range[1].size = regs.eax * 1024;
744 			}
745 			gKernelArgs.num_physical_memory_ranges = 2;
746 		} else {
747 			dprintf("Get Memory Size for Large Configurations succeeded.\n");
748 			gKernelArgs.physical_memory_range[1].size = regs.ecx * 1024;
749 			gKernelArgs.physical_memory_range[2].start = 0x1000000;
750 			gKernelArgs.physical_memory_range[2].size = regs.edx * 64 * 1024;
751 			gKernelArgs.num_physical_memory_ranges = 3;
752 		}
753 	}
754 
755 	gKernelArgs.arch_args.page_hole = 0xffc00000;
756 }
757 
758 
759 //	#pragma mark -
760 
761 
762 extern "C" status_t
763 platform_allocate_region(void **_address, size_t size, uint8 protection,
764 	bool /*exactAddress*/)
765 {
766 	void *address = mmu_allocate(*_address, size);
767 	if (address == NULL)
768 		return B_NO_MEMORY;
769 
770 	*_address = address;
771 	return B_OK;
772 }
773 
774 
775 extern "C" status_t
776 platform_free_region(void *address, size_t size)
777 {
778 	mmu_free(address, size);
779 	return B_OK;
780 }
781 
782 
783 void
784 platform_release_heap(struct stage2_args *args, void *base)
785 {
786 	// It will be freed automatically, since it is in the
787 	// identity mapped region, and not stored in the kernel's
788 	// page tables.
789 }
790 
791 
792 status_t
793 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
794 {
795 	void *heap = (void *)get_next_physical_address(args->heap_size);
796 	if (heap == NULL)
797 		return B_NO_MEMORY;
798 
799 	*_base = heap;
800 	*_top = (void *)((int8 *)heap + args->heap_size);
801 	return B_OK;
802 }
803 
804 
805