xref: /haiku/src/system/boot/platform/bios_ia32/mmu.cpp (revision f75a7bf508f3156d63a14f8fd77c5e0ca4d08c42)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 #include "bios.h"
11 
12 #include <boot/platform.h>
13 #include <boot/stdio.h>
14 #include <boot/kernel_args.h>
15 #include <boot/stage2.h>
16 #include <arch/cpu.h>
17 #include <arch_kernel.h>
18 #include <kernel.h>
19 
20 #include <OS.h>
21 
22 #include <string.h>
23 
24 
25 /*!	The (physical) memory layout of the boot loader is currently as follows:
26 	  0x0500 - 0x10000	protected mode stack
27 	  0x0500 - 0x09000	real mode stack
28 	 0x10000 - ?		code (up to ~500 kB)
29 	 0x90000			1st temporary page table (identity maps 0-4 MB)
30 	 0x91000			2nd (4-8 MB)
31 	 0x92000 - 0x92000	further page tables
32 	 0x9e000 - 0xa0000	SMP trampoline code
33 	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
34 	0x100000			page directory
35 	     ...			boot loader heap (32 kB)
36 	     ...			free physical memory
37 
38 	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
39 	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
40 	loader (kernel args, modules, driver settings, ...) comes after
41 	0x80020000 which means that there is currently only 2 MB reserved for
42 	the kernel itself (see kMaxKernelSize).
43 
44 	The layout in PXE mode differs a bit from this, see definitions below.
45 */
46 
47 //#define TRACE_MMU
48 #ifdef TRACE_MMU
49 #	define TRACE(x) dprintf x
50 #else
51 #	define TRACE(x) ;
52 #endif
53 
54 
55 //#define TRACE_MEMORY_MAP
56 	// Define this to print the memory map to serial debug,
57 	// You also need to define ENABLE_SERIAL in serial.cpp
58 	// for output to work.
59 
60 
61 struct gdt_idt_descr {
62 	uint16 limit;
63 	uint32 *base;
64 } _PACKED;
65 
66 // memory structure returned by int 0x15, ax 0xe820
67 struct extended_memory {
68 	uint64 base_addr;
69 	uint64 length;
70 	uint32 type;
71 };
72 
73 
74 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
75 static const size_t kMaxKernelSize = 0x200000;		// 2 MB for the kernel
76 
77 // working page directory and page table
78 static uint32 *sPageDirectory = 0;
79 
80 #ifdef _PXE_ENV
81 
82 static addr_t sNextPhysicalAddress = 0x112000;
83 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
84 static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
85 
86 static addr_t sNextPageTableAddress = 0x7d000;
87 static const uint32 kPageTableRegionEnd = 0x8b000;
88 	// we need to reserve 2 pages for the SMP trampoline code
89 
90 #else
91 
92 static addr_t sNextPhysicalAddress = 0x100000;
93 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
94 static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
95 
96 static addr_t sNextPageTableAddress = 0x90000;
97 static const uint32 kPageTableRegionEnd = 0x9e000;
98 	// we need to reserve 2 pages for the SMP trampoline code
99 
100 #endif
101 
102 
103 static addr_t
104 get_next_virtual_address(size_t size)
105 {
106 	addr_t address = sNextVirtualAddress;
107 	sNextVirtualAddress += size;
108 
109 	return address;
110 }
111 
112 
113 static addr_t
114 get_next_physical_address(size_t size)
115 {
116 	addr_t address = sNextPhysicalAddress;
117 	sNextPhysicalAddress += size;
118 
119 	return address;
120 }
121 
122 
123 static addr_t
124 get_next_virtual_page()
125 {
126 	return get_next_virtual_address(B_PAGE_SIZE);
127 }
128 
129 
130 static addr_t
131 get_next_physical_page()
132 {
133 	return get_next_physical_address(B_PAGE_SIZE);
134 }
135 
136 
137 static uint32 *
138 get_next_page_table()
139 {
140 	TRACE(("get_next_page_table, sNextPageTableAddress %p, kPageTableRegionEnd "
141 		"%p\n", sNextPageTableAddress, kPageTableRegionEnd));
142 
143 	addr_t address = sNextPageTableAddress;
144 	if (address >= kPageTableRegionEnd)
145 		return (uint32 *)get_next_physical_page();
146 
147 	sNextPageTableAddress += B_PAGE_SIZE;
148 	return (uint32 *)address;
149 }
150 
151 
152 /*!	Adds a new page table for the specified base address */
153 static void
154 add_page_table(addr_t base)
155 {
156 	TRACE(("add_page_table(base = %p)\n", (void *)base));
157 
158 	// Get new page table and clear it out
159 	uint32 *pageTable = get_next_page_table();
160 	if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
161 		panic("tried to add page table beyond the indentity mapped 8 MB "
162 			"region\n");
163 	}
164 
165 	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++]
166 		= (uint32)pageTable;
167 
168 	for (int32 i = 0; i < 1024; i++)
169 		pageTable[i] = 0;
170 
171 	// put the new page table into the page directory
172 	sPageDirectory[base / (4 * 1024 * 1024)]
173 		= (uint32)pageTable | kDefaultPageTableFlags;
174 }
175 
176 
177 static void
178 unmap_page(addr_t virtualAddress)
179 {
180 	TRACE(("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
181 
182 	if (virtualAddress < KERNEL_BASE) {
183 		panic("unmap_page: asked to unmap invalid page %p!\n",
184 			(void *)virtualAddress);
185 	}
186 
187 	// unmap the page from the correct page table
188 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
189 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
190 	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
191 
192 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
193 }
194 
195 
196 /*!	Creates an entry to map the specified virtualAddress to the given
197 	physicalAddress.
198 	If the mapping goes beyond the current page table, it will allocate
199 	a new one. If it cannot map the requested page, it panics.
200 */
201 static void
202 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
203 {
204 	TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
205 
206 	if (virtualAddress < KERNEL_BASE) {
207 		panic("map_page: asked to map invalid page %p!\n",
208 			(void *)virtualAddress);
209 	}
210 
211 	if (virtualAddress >= sMaxVirtualAddress) {
212 		// we need to add a new page table
213 
214 		add_page_table(sMaxVirtualAddress);
215 		sMaxVirtualAddress += B_PAGE_SIZE * 1024;
216 
217 		if (virtualAddress >= sMaxVirtualAddress) {
218 			panic("map_page: asked to map a page to %p\n",
219 				(void *)virtualAddress);
220 		}
221 	}
222 
223 	physicalAddress &= ~(B_PAGE_SIZE - 1);
224 
225 	// map the page to the correct page table
226 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
227 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
228 	uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE;
229 
230 	TRACE(("map_page: inserting pageTable %p, tableEntry %ld, physicalAddress "
231 		"%p\n", pageTable, tableEntry, physicalAddress));
232 
233 	pageTable[tableEntry] = physicalAddress | flags;
234 
235 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
236 
237 	TRACE(("map_page: done\n"));
238 }
239 
240 
241 static void
242 sort_addr_range(addr_range *range, int count)
243 {
244 	addr_range tempRange;
245 	bool done;
246 	int i;
247 
248 	do {
249 		done = true;
250 		for (i = 1; i < count; i++) {
251 			if (range[i].start < range[i - 1].start) {
252 				done = false;
253 				memcpy(&tempRange, &range[i], sizeof(addr_range));
254 				memcpy(&range[i], &range[i - 1], sizeof(addr_range));
255 				memcpy(&range[i - 1], &tempRange, sizeof(addr_range));
256 			}
257 		}
258 	} while (!done);
259 }
260 
261 
262 
263 #ifdef TRACE_MEMORY_MAP
264 static const char *
265 e820_memory_type(uint32 type)
266 {
267 	switch (type) {
268 		case 1: return "memory";
269 		case 2: return "reserved";
270 		case 3: return "ACPI reclaim";
271 		case 4: return "ACPI NVS";
272 		default: return "unknown/reserved";
273 	}
274 }
275 #endif
276 
277 
278 static uint32
279 get_memory_map(extended_memory **_extendedMemory)
280 {
281 	extended_memory *block = (extended_memory *)kExtraSegmentScratch;
282 	bios_regs regs = {0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
283 	uint32 count = 0;
284 
285 	TRACE(("get_memory_map()\n"));
286 
287 	do {
288 		regs.eax = 0xe820;
289 		regs.edx = 'SMAP';
290 
291 		call_bios(0x15, &regs);
292 		if (regs.flags & CARRY_FLAG)
293 			return 0;
294 
295 		regs.edi += sizeof(extended_memory);
296 		count++;
297 	} while (regs.ebx != 0);
298 
299 	*_extendedMemory = block;
300 
301 #ifdef TRACE_MEMORY_MAP
302 	dprintf("extended memory info (from 0xe820):\n");
303 	for (uint32 i = 0; i < count; i++) {
304 		dprintf("    base 0x%08Lx, len 0x%08Lx, type %lu (%s)\n",
305 			block[i].base_addr, block[i].length,
306 			block[i].type, e820_memory_type(block[i].type));
307 	}
308 #endif
309 
310 	return count;
311 }
312 
313 
314 static void
315 init_page_directory(void)
316 {
317 	TRACE(("init_page_directory\n"));
318 
319 	// allocate a new pgdir
320 	sPageDirectory = (uint32 *)get_next_physical_page();
321 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
322 
323 	// clear out the pgdir
324 	for (int32 i = 0; i < 1024; i++) {
325 		sPageDirectory[i] = 0;
326 	}
327 
328 	// Identity map the first 8 MB of memory so that their
329 	// physical and virtual address are the same.
330 	// These page tables won't be taken over into the kernel.
331 
332 	// make the first page table at the first free spot
333 	uint32 *pageTable = get_next_page_table();
334 
335 	for (int32 i = 0; i < 1024; i++) {
336 		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
337 	}
338 
339 	sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
340 
341 	// make the second page table
342 	pageTable = get_next_page_table();
343 
344 	for (int32 i = 0; i < 1024; i++) {
345 		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
346 	}
347 
348 	sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
349 
350 	gKernelArgs.arch_args.num_pgtables = 0;
351 	add_page_table(KERNEL_BASE);
352 
353 	// switch to the new pgdir and enable paging
354 	asm("movl %0, %%eax;"
355 		"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
356 	// Important.  Make sure supervisor threads can fault on read only pages...
357 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
358 }
359 
360 
361 //	#pragma mark -
362 
363 
364 extern "C" addr_t
365 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
366 {
367 	addr_t address = sNextVirtualAddress;
368 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
369 
370 	physicalAddress -= pageOffset;
371 
372 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
373 		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
374 	}
375 
376 	return address + pageOffset;
377 }
378 
379 
380 extern "C" void *
381 mmu_allocate(void *virtualAddress, size_t size)
382 {
383 	TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: "
384 		"%ld\n", virtualAddress, sNextVirtualAddress, size));
385 
386 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
387 		// get number of pages to map
388 
389 	if (virtualAddress != NULL) {
390 		// This special path is almost only useful for loading the
391 		// kernel into memory; it will only allow you to map the
392 		// 'kMaxKernelSize' bytes following the kernel base address.
393 		// Also, it won't check for already mapped addresses, so
394 		// you better know why you are here :)
395 		addr_t address = (addr_t)virtualAddress;
396 
397 		// is the address within the valid range?
398 		if (address < KERNEL_BASE
399 			|| address + size >= KERNEL_BASE + kMaxKernelSize)
400 			return NULL;
401 
402 		for (uint32 i = 0; i < size; i++) {
403 			map_page(address, get_next_physical_page(), kDefaultPageFlags);
404 			address += B_PAGE_SIZE;
405 		}
406 
407 		return virtualAddress;
408 	}
409 
410 	void *address = (void *)sNextVirtualAddress;
411 
412 	for (uint32 i = 0; i < size; i++) {
413 		map_page(get_next_virtual_page(), get_next_physical_page(),
414 			kDefaultPageFlags);
415 	}
416 
417 	return address;
418 }
419 
420 
421 /*!	This will unmap the allocated chunk of memory from the virtual
422 	address space. It might not actually free memory (as its implementation
423 	is very simple), but it might.
424 */
425 extern "C" void
426 mmu_free(void *virtualAddress, size_t size)
427 {
428 	TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
429 
430 	addr_t address = (addr_t)virtualAddress;
431 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
432 		// get number of pages to map
433 
434 	// is the address within the valid range?
435 	if (address < KERNEL_BASE
436 		|| address + size >= KERNEL_BASE + kMaxKernelSize) {
437 		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
438 			(void *)address, size);
439 	}
440 
441 	// unmap all pages within the range
442 	for (uint32 i = 0; i < size; i++) {
443 		unmap_page(address);
444 		address += B_PAGE_SIZE;
445 	}
446 
447 	if (address == sNextVirtualAddress) {
448 		// we can actually reuse the virtual address space
449 		sNextVirtualAddress -= size;
450 	}
451 }
452 
453 
454 /*!	Sets up the final and kernel accessible GDT and IDT tables.
455 	BIOS calls won't work any longer after this function has
456 	been called.
457 */
458 extern "C" void
459 mmu_init_for_kernel(void)
460 {
461 	TRACE(("mmu_init_for_kernel\n"));
462 	// set up a new idt
463 	{
464 		struct gdt_idt_descr idtDescriptor;
465 		uint32 *idt;
466 
467 		// find a new idt
468 		idt = (uint32 *)get_next_physical_page();
469 		gKernelArgs.arch_args.phys_idt = (uint32)idt;
470 
471 		TRACE(("idt at %p\n", idt));
472 
473 		// map the idt into virtual space
474 		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
475 		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
476 
477 		// clear it out
478 		uint32* virtualIDT = (uint32*)gKernelArgs.arch_args.vir_idt;
479 		for (int32 i = 0; i < IDT_LIMIT / 4; i++) {
480 			virtualIDT[i] = 0;
481 		}
482 
483 		// load the idt
484 		idtDescriptor.limit = IDT_LIMIT - 1;
485 		idtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_idt;
486 
487 		asm("lidt	%0;"
488 			: : "m" (idtDescriptor));
489 
490 		TRACE(("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt));
491 	}
492 
493 	// set up a new gdt
494 	{
495 		struct gdt_idt_descr gdtDescriptor;
496 		segment_descriptor *gdt;
497 
498 		// find a new gdt
499 		gdt = (segment_descriptor *)get_next_physical_page();
500 		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
501 
502 		TRACE(("gdt at %p\n", gdt));
503 
504 		// map the gdt into virtual space
505 		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
506 		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
507 
508 		// put standard segment descriptors in it
509 		segment_descriptor* virtualGDT
510 			= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
511 		clear_segment_descriptor(&virtualGDT[0]);
512 
513 		// seg 0x08 - kernel 4GB code
514 		set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
515 			DPL_KERNEL);
516 
517 		// seg 0x10 - kernel 4GB data
518 		set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
519 			DPL_KERNEL);
520 
521 		// seg 0x1b - ring 3 user 4GB code
522 		set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
523 			DPL_USER);
524 
525 		// seg 0x23 - ring 3 user 4GB data
526 		set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
527 			DPL_USER);
528 
529 		// virtualGDT[5] and above will be filled later by the kernel
530 		// to contain the TSS descriptors, and for TLS (one for every CPU)
531 
532 		// load the GDT
533 		gdtDescriptor.limit = GDT_LIMIT - 1;
534 		gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt;
535 
536 		asm("lgdt	%0;"
537 			: : "m" (gdtDescriptor));
538 
539 		TRACE(("gdt at virtual address %p\n", (void *)gKernelArgs.arch_args.vir_gdt));
540 	}
541 
542 	// save the memory we've physically allocated
543 	gKernelArgs.physical_allocated_range[0].size
544 		= sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
545 
546 	// Save the memory we've virtually allocated (for the kernel and other
547 	// stuff)
548 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
549 	gKernelArgs.virtual_allocated_range[0].size
550 		= sNextVirtualAddress - KERNEL_BASE;
551 	gKernelArgs.num_virtual_allocated_ranges = 1;
552 
553 	// sort the address ranges
554 	sort_addr_range(gKernelArgs.physical_memory_range,
555 		gKernelArgs.num_physical_memory_ranges);
556 	sort_addr_range(gKernelArgs.physical_allocated_range,
557 		gKernelArgs.num_physical_allocated_ranges);
558 	sort_addr_range(gKernelArgs.virtual_allocated_range,
559 		gKernelArgs.num_virtual_allocated_ranges);
560 
561 #ifdef TRACE_MEMORY_MAP
562 	{
563 		uint32 i;
564 
565 		dprintf("phys memory ranges:\n");
566 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
567 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_memory_range[i].start, gKernelArgs.physical_memory_range[i].size);
568 		}
569 
570 		dprintf("allocated phys memory ranges:\n");
571 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
572 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_allocated_range[i].start, gKernelArgs.physical_allocated_range[i].size);
573 		}
574 
575 		dprintf("allocated virt memory ranges:\n");
576 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
577 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.virtual_allocated_range[i].start, gKernelArgs.virtual_allocated_range[i].size);
578 		}
579 	}
580 #endif
581 }
582 
583 
584 extern "C" void
585 mmu_init(void)
586 {
587 	TRACE(("mmu_init\n"));
588 
589 	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
590 	gKernelArgs.physical_allocated_range[0].size = 0;
591 	gKernelArgs.num_physical_allocated_ranges = 1;
592 		// remember the start of the allocated physical pages
593 
594 	init_page_directory();
595 
596 	// Map the page directory into kernel space at 0xffc00000-0xffffffff
597 	// this enables a mmu trick where the 4 MB region that this pgdir entry
598 	// represents now maps the 4MB of potential pagetables that the pgdir
599 	// points to. Thrown away later in VM bringup, but useful for now.
600 	sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
601 
602 	// also map it on the next vpage
603 	gKernelArgs.arch_args.vir_pgdir = get_next_virtual_page();
604 	map_page(gKernelArgs.arch_args.vir_pgdir, (uint32)sPageDirectory,
605 		kDefaultPageFlags);
606 
607 	// map in a kernel stack
608 	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
609 		KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
610 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
611 		+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
612 
613 	TRACE(("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
614 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
615 
616 	extended_memory *extMemoryBlock;
617 	uint32 extMemoryCount = get_memory_map(&extMemoryBlock);
618 
619 	// figure out the memory map
620 	if (extMemoryCount > 0) {
621 		gKernelArgs.num_physical_memory_ranges = 0;
622 
623 		for (uint32 i = 0; i < extMemoryCount; i++) {
624 			// Type 1 is available memory
625 			if (extMemoryBlock[i].type == 1) {
626 				// round everything up to page boundaries, exclusive of pages
627 				// it partially occupies
628 				if ((extMemoryBlock[i].base_addr % B_PAGE_SIZE) != 0) {
629 					extMemoryBlock[i].length -= B_PAGE_SIZE
630 						- extMemoryBlock[i].base_addr % B_PAGE_SIZE;
631 				}
632 				extMemoryBlock[i].base_addr
633 					= ROUNDUP(extMemoryBlock[i].base_addr, B_PAGE_SIZE);
634 				extMemoryBlock[i].length
635 					= ROUNDOWN(extMemoryBlock[i].length, B_PAGE_SIZE);
636 
637 				// we ignore all memory beyond 4 GB
638 				if (extMemoryBlock[i].base_addr > 0xffffffffULL)
639 					continue;
640 
641 				if (extMemoryBlock[i].base_addr + extMemoryBlock[i].length
642 						> 0xffffffffULL) {
643 					extMemoryBlock[i].length
644 						= 0x100000000ULL - extMemoryBlock[i].base_addr;
645 				}
646 
647 				if (gKernelArgs.num_physical_memory_ranges > 0) {
648 					// we might want to extend a previous hole
649 					addr_t previousEnd = gKernelArgs.physical_memory_range[
650 							gKernelArgs.num_physical_memory_ranges - 1].start
651 						+ gKernelArgs.physical_memory_range[
652 							gKernelArgs.num_physical_memory_ranges - 1].size;
653 					addr_t holeSize = extMemoryBlock[i].base_addr - previousEnd;
654 
655 					// If the hole is smaller than 1 MB, we try to mark the
656 					// memory as allocated and extend the previous memory range
657 					if (previousEnd <= extMemoryBlock[i].base_addr
658 						&& holeSize < 0x100000
659 						&& insert_physical_allocated_range(previousEnd,
660 							extMemoryBlock[i].base_addr - previousEnd)
661 								== B_OK) {
662 						gKernelArgs.physical_memory_range[
663 							gKernelArgs.num_physical_memory_ranges - 1].size
664 								+= holeSize;
665 					}
666 				}
667 
668 				insert_physical_memory_range(extMemoryBlock[i].base_addr,
669 					extMemoryBlock[i].length);
670 			}
671 		}
672 	} else {
673 		// TODO: for now!
674 		dprintf("No extended memory block - using 32 MB (fix me!)\n");
675 		uint32 memSize = 32 * 1024 * 1024;
676 
677 		// We dont have an extended map, assume memory is contiguously mapped
678 		// at 0x0
679 		gKernelArgs.physical_memory_range[0].start = 0;
680 		gKernelArgs.physical_memory_range[0].size = memSize;
681 		gKernelArgs.num_physical_memory_ranges = 1;
682 
683 		// mark the bios area allocated
684 		uint32 biosRange = gKernelArgs.num_physical_allocated_ranges++;
685 
686 		gKernelArgs.physical_allocated_range[biosRange].start = 0x9f000;
687 			// 640k - 1 page
688 		gKernelArgs.physical_allocated_range[biosRange].size = 0x61000;
689 	}
690 
691 	gKernelArgs.arch_args.page_hole = 0xffc00000;
692 }
693 
694 
695 //	#pragma mark -
696 
697 
698 extern "C" status_t
699 platform_allocate_region(void **_address, size_t size, uint8 protection,
700 	bool /*exactAddress*/)
701 {
702 	void *address = mmu_allocate(*_address, size);
703 	if (address == NULL)
704 		return B_NO_MEMORY;
705 
706 	*_address = address;
707 	return B_OK;
708 }
709 
710 
711 extern "C" status_t
712 platform_free_region(void *address, size_t size)
713 {
714 	mmu_free(address, size);
715 	return B_OK;
716 }
717 
718 
719 void
720 platform_release_heap(struct stage2_args *args, void *base)
721 {
722 	// It will be freed automatically, since it is in the
723 	// identity mapped region, and not stored in the kernel's
724 	// page tables.
725 }
726 
727 
728 status_t
729 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
730 {
731 	void *heap = (void *)get_next_physical_address(args->heap_size);
732 	if (heap == NULL)
733 		return B_NO_MEMORY;
734 
735 	*_base = heap;
736 	*_top = (void *)((int8 *)heap + args->heap_size);
737 	return B_OK;
738 }
739 
740 
741