xref: /haiku/src/system/boot/platform/bios_ia32/mmu.cpp (revision 4b3b81da9e459443d75329cfd08bc9a57ad02653)
1 /*
2  * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 #include "bios.h"
11 
12 #include <boot/platform.h>
13 #include <boot/stdio.h>
14 #include <boot/kernel_args.h>
15 #include <boot/stage2.h>
16 #include <arch/cpu.h>
17 #include <arch_kernel.h>
18 #include <kernel.h>
19 
20 #include <OS.h>
21 
22 #include <string.h>
23 
24 
25 /*!	The (physical) memory layout of the boot loader is currently as follows:
26 	  0x0500 - 0x10000	protected mode stack
27 	  0x0500 - 0x09000	real mode stack
28 	 0x10000 - ?		code (up to ~500 kB)
29 	 0x90000			1st temporary page table (identity maps 0-4 MB)
30 	 0x91000			2nd (4-8 MB)
31 	 0x92000 - 0x92000	further page tables
32 	 0x9e000 - 0xa0000	SMP trampoline code
33 	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
34 	0x100000			page directory
35 	     ...			boot loader heap (32 kB)
36 	     ...			free physical memory
37 
38 	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
39 	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
40 	loader (kernel args, modules, driver settings, ...) comes after
41 	0x80020000 which means that there is currently only 2 MB reserved for
42 	the kernel itself (see kMaxKernelSize).
43 
44 	The layout in PXE mode differs a bit from this, see definitions below.
45 */
46 
47 //#define TRACE_MMU
48 #ifdef TRACE_MMU
49 #	define TRACE(x) dprintf x
50 #else
51 #	define TRACE(x) ;
52 #endif
53 
54 struct gdt_idt_descr {
55 	uint16 limit;
56 	uint32 *base;
57 } _PACKED;
58 
59 // memory structure returned by int 0x15, ax 0xe820
60 struct extended_memory {
61 	uint64 base_addr;
62 	uint64 length;
63 	uint32 type;
64 };
65 
66 
67 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
68 static const size_t kMaxKernelSize = 0x200000;		// 2 MB for the kernel
69 
70 // working page directory and page table
71 static uint32 *sPageDirectory = 0;
72 
73 #ifdef _PXE_ENV
74 
75 static addr_t sNextPhysicalAddress = 0x112000;
76 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
77 static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
78 
79 static addr_t sNextPageTableAddress = 0x7d000;
80 static const uint32 kPageTableRegionEnd = 0x8b000;
81 	// we need to reserve 2 pages for the SMP trampoline code
82 
83 #else
84 
85 static addr_t sNextPhysicalAddress = 0x100000;
86 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
87 static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
88 
89 static addr_t sNextPageTableAddress = 0x90000;
90 static const uint32 kPageTableRegionEnd = 0x9e000;
91 	// we need to reserve 2 pages for the SMP trampoline code
92 
93 #endif
94 
95 
96 static addr_t
97 get_next_virtual_address(size_t size)
98 {
99 	addr_t address = sNextVirtualAddress;
100 	sNextVirtualAddress += size;
101 
102 	return address;
103 }
104 
105 
106 static addr_t
107 get_next_physical_address(size_t size)
108 {
109 	addr_t address = sNextPhysicalAddress;
110 	sNextPhysicalAddress += size;
111 
112 	return address;
113 }
114 
115 
116 static addr_t
117 get_next_virtual_page()
118 {
119 	return get_next_virtual_address(B_PAGE_SIZE);
120 }
121 
122 
123 static addr_t
124 get_next_physical_page()
125 {
126 	return get_next_physical_address(B_PAGE_SIZE);
127 }
128 
129 
130 static uint32 *
131 get_next_page_table()
132 {
133 	TRACE(("get_next_page_table, sNextPageTableAddress %p, kPageTableRegionEnd "
134 		"%p\n", sNextPageTableAddress, kPageTableRegionEnd));
135 
136 	addr_t address = sNextPageTableAddress;
137 	if (address >= kPageTableRegionEnd)
138 		return (uint32 *)get_next_physical_page();
139 
140 	sNextPageTableAddress += B_PAGE_SIZE;
141 	return (uint32 *)address;
142 }
143 
144 
145 /*!	Adds a new page table for the specified base address */
146 static void
147 add_page_table(addr_t base)
148 {
149 	TRACE(("add_page_table(base = %p)\n", (void *)base));
150 
151 	// Get new page table and clear it out
152 	uint32 *pageTable = get_next_page_table();
153 	if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
154 		panic("tried to add page table beyond the indentity mapped 8 MB "
155 			"region\n");
156 	}
157 
158 	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++]
159 		= (uint32)pageTable;
160 
161 	for (int32 i = 0; i < 1024; i++)
162 		pageTable[i] = 0;
163 
164 	// put the new page table into the page directory
165 	sPageDirectory[base / (4 * 1024 * 1024)]
166 		= (uint32)pageTable | kDefaultPageTableFlags;
167 }
168 
169 
170 static void
171 unmap_page(addr_t virtualAddress)
172 {
173 	TRACE(("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
174 
175 	if (virtualAddress < KERNEL_BASE) {
176 		panic("unmap_page: asked to unmap invalid page %p!\n",
177 			(void *)virtualAddress);
178 	}
179 
180 	// unmap the page from the correct page table
181 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
182 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
183 	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
184 
185 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
186 }
187 
188 
189 /*!	Creates an entry to map the specified virtualAddress to the given
190 	physicalAddress.
191 	If the mapping goes beyond the current page table, it will allocate
192 	a new one. If it cannot map the requested page, it panics.
193 */
194 static void
195 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
196 {
197 	TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
198 
199 	if (virtualAddress < KERNEL_BASE) {
200 		panic("map_page: asked to map invalid page %p!\n",
201 			(void *)virtualAddress);
202 	}
203 
204 	if (virtualAddress >= sMaxVirtualAddress) {
205 		// we need to add a new page table
206 
207 		add_page_table(sMaxVirtualAddress);
208 		sMaxVirtualAddress += B_PAGE_SIZE * 1024;
209 
210 		if (virtualAddress >= sMaxVirtualAddress) {
211 			panic("map_page: asked to map a page to %p\n",
212 				(void *)virtualAddress);
213 		}
214 	}
215 
216 	physicalAddress &= ~(B_PAGE_SIZE - 1);
217 
218 	// map the page to the correct page table
219 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
220 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
221 	uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE;
222 
223 	TRACE(("map_page: inserting pageTable %p, tableEntry %ld, physicalAddress "
224 		"%p\n", pageTable, tableEntry, physicalAddress));
225 
226 	pageTable[tableEntry] = physicalAddress | flags;
227 
228 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
229 
230 	TRACE(("map_page: done\n"));
231 }
232 
233 
234 static void
235 sort_addr_range(addr_range *range, int count)
236 {
237 	addr_range tempRange;
238 	bool done;
239 	int i;
240 
241 	do {
242 		done = true;
243 		for (i = 1; i < count; i++) {
244 			if (range[i].start < range[i - 1].start) {
245 				done = false;
246 				memcpy(&tempRange, &range[i], sizeof(addr_range));
247 				memcpy(&range[i], &range[i - 1], sizeof(addr_range));
248 				memcpy(&range[i - 1], &tempRange, sizeof(addr_range));
249 			}
250 		}
251 	} while (!done);
252 }
253 
254 
255 static uint32
256 get_memory_map(extended_memory **_extendedMemory)
257 {
258 	extended_memory *block = (extended_memory *)kExtraSegmentScratch;
259 	bios_regs regs = {0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
260 	uint32 count = 0;
261 
262 	TRACE(("get_memory_map()\n"));
263 
264 	do {
265 		regs.eax = 0xe820;
266 		regs.edx = 'SMAP';
267 
268 		call_bios(0x15, &regs);
269 		if (regs.flags & CARRY_FLAG)
270 			return 0;
271 
272 		regs.edi += sizeof(extended_memory);
273 		count++;
274 	} while (regs.ebx != 0);
275 
276 	*_extendedMemory = block;
277 
278 #ifdef TRACE_MMU
279 	dprintf("extended memory info (from 0xe820):\n");
280 	for (uint32 i = 0; i < count; i++) {
281 		dprintf("    base 0x%Lx, len 0x%Lx, type %lu\n",
282 			block[i].base_addr, block[i].length, block[i].type);
283 	}
284 #endif
285 
286 	return count;
287 }
288 
289 
290 static void
291 init_page_directory(void)
292 {
293 	TRACE(("init_page_directory\n"));
294 
295 	// allocate a new pgdir
296 	sPageDirectory = (uint32 *)get_next_physical_page();
297 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
298 
299 	// clear out the pgdir
300 	for (int32 i = 0; i < 1024; i++) {
301 		sPageDirectory[i] = 0;
302 	}
303 
304 	// Identity map the first 8 MB of memory so that their
305 	// physical and virtual address are the same.
306 	// These page tables won't be taken over into the kernel.
307 
308 	// make the first page table at the first free spot
309 	uint32 *pageTable = get_next_page_table();
310 
311 	for (int32 i = 0; i < 1024; i++) {
312 		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
313 	}
314 
315 	sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
316 
317 	// make the second page table
318 	pageTable = get_next_page_table();
319 
320 	for (int32 i = 0; i < 1024; i++) {
321 		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
322 	}
323 
324 	sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
325 
326 	gKernelArgs.arch_args.num_pgtables = 0;
327 	add_page_table(KERNEL_BASE);
328 
329 	// switch to the new pgdir and enable paging
330 	asm("movl %0, %%eax;"
331 		"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
332 	// Important.  Make sure supervisor threads can fault on read only pages...
333 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
334 }
335 
336 
337 //	#pragma mark -
338 
339 
340 extern "C" addr_t
341 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
342 {
343 	addr_t address = sNextVirtualAddress;
344 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
345 
346 	physicalAddress -= pageOffset;
347 
348 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
349 		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
350 	}
351 
352 	return address + pageOffset;
353 }
354 
355 
356 extern "C" void *
357 mmu_allocate(void *virtualAddress, size_t size)
358 {
359 	TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: "
360 		"%ld\n", virtualAddress, sNextVirtualAddress, size));
361 
362 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
363 		// get number of pages to map
364 
365 	if (virtualAddress != NULL) {
366 		// This special path is almost only useful for loading the
367 		// kernel into memory; it will only allow you to map the
368 		// 'kMaxKernelSize' bytes following the kernel base address.
369 		// Also, it won't check for already mapped addresses, so
370 		// you better know why you are here :)
371 		addr_t address = (addr_t)virtualAddress;
372 
373 		// is the address within the valid range?
374 		if (address < KERNEL_BASE
375 			|| address + size >= KERNEL_BASE + kMaxKernelSize)
376 			return NULL;
377 
378 		for (uint32 i = 0; i < size; i++) {
379 			map_page(address, get_next_physical_page(), kDefaultPageFlags);
380 			address += B_PAGE_SIZE;
381 		}
382 
383 		return virtualAddress;
384 	}
385 
386 	void *address = (void *)sNextVirtualAddress;
387 
388 	for (uint32 i = 0; i < size; i++) {
389 		map_page(get_next_virtual_page(), get_next_physical_page(),
390 			kDefaultPageFlags);
391 	}
392 
393 	return address;
394 }
395 
396 
397 /*!	This will unmap the allocated chunk of memory from the virtual
398 	address space. It might not actually free memory (as its implementation
399 	is very simple), but it might.
400 */
401 extern "C" void
402 mmu_free(void *virtualAddress, size_t size)
403 {
404 	TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
405 
406 	addr_t address = (addr_t)virtualAddress;
407 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
408 		// get number of pages to map
409 
410 	// is the address within the valid range?
411 	if (address < KERNEL_BASE
412 		|| address + size >= KERNEL_BASE + kMaxKernelSize) {
413 		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
414 			(void *)address, size);
415 	}
416 
417 	// unmap all pages within the range
418 	for (uint32 i = 0; i < size; i++) {
419 		unmap_page(address);
420 		address += B_PAGE_SIZE;
421 	}
422 
423 	if (address == sNextVirtualAddress) {
424 		// we can actually reuse the virtual address space
425 		sNextVirtualAddress -= size;
426 	}
427 }
428 
429 
430 /*!	Sets up the final and kernel accessible GDT and IDT tables.
431 	BIOS calls won't work any longer after this function has
432 	been called.
433 */
434 extern "C" void
435 mmu_init_for_kernel(void)
436 {
437 	TRACE(("mmu_init_for_kernel\n"));
438 	// set up a new idt
439 	{
440 		struct gdt_idt_descr idtDescriptor;
441 		uint32 *idt;
442 
443 		// find a new idt
444 		idt = (uint32 *)get_next_physical_page();
445 		gKernelArgs.arch_args.phys_idt = (uint32)idt;
446 
447 		TRACE(("idt at %p\n", idt));
448 
449 		// map the idt into virtual space
450 		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
451 		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
452 
453 		// clear it out
454 		uint32* virtualIDT = (uint32*)gKernelArgs.arch_args.vir_idt;
455 		for (int32 i = 0; i < IDT_LIMIT / 4; i++) {
456 			virtualIDT[i] = 0;
457 		}
458 
459 		// load the idt
460 		idtDescriptor.limit = IDT_LIMIT - 1;
461 		idtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_idt;
462 
463 		asm("lidt	%0;"
464 			: : "m" (idtDescriptor));
465 
466 		TRACE(("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt));
467 	}
468 
469 	// set up a new gdt
470 	{
471 		struct gdt_idt_descr gdtDescriptor;
472 		segment_descriptor *gdt;
473 
474 		// find a new gdt
475 		gdt = (segment_descriptor *)get_next_physical_page();
476 		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
477 
478 		TRACE(("gdt at %p\n", gdt));
479 
480 		// map the gdt into virtual space
481 		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
482 		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
483 
484 		// put standard segment descriptors in it
485 		segment_descriptor* virtualGDT
486 			= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
487 		clear_segment_descriptor(&virtualGDT[0]);
488 
489 		// seg 0x08 - kernel 4GB code
490 		set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
491 			DPL_KERNEL);
492 
493 		// seg 0x10 - kernel 4GB data
494 		set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
495 			DPL_KERNEL);
496 
497 		// seg 0x1b - ring 3 user 4GB code
498 		set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
499 			DPL_USER);
500 
501 		// seg 0x23 - ring 3 user 4GB data
502 		set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
503 			DPL_USER);
504 
505 		// virtualGDT[5] and above will be filled later by the kernel
506 		// to contain the TSS descriptors, and for TLS (one for every CPU)
507 
508 		// load the GDT
509 		gdtDescriptor.limit = GDT_LIMIT - 1;
510 		gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt;
511 
512 		asm("lgdt	%0;"
513 			: : "m" (gdtDescriptor));
514 
515 		TRACE(("gdt at virtual address %p\n", (void *)gKernelArgs.arch_args.vir_gdt));
516 	}
517 
518 	// save the memory we've physically allocated
519 	gKernelArgs.physical_allocated_range[0].size
520 		= sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
521 
522 	// Save the memory we've virtually allocated (for the kernel and other
523 	// stuff)
524 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
525 	gKernelArgs.virtual_allocated_range[0].size
526 		= sNextVirtualAddress - KERNEL_BASE;
527 	gKernelArgs.num_virtual_allocated_ranges = 1;
528 
529 	// sort the address ranges
530 	sort_addr_range(gKernelArgs.physical_memory_range,
531 		gKernelArgs.num_physical_memory_ranges);
532 	sort_addr_range(gKernelArgs.physical_allocated_range,
533 		gKernelArgs.num_physical_allocated_ranges);
534 	sort_addr_range(gKernelArgs.virtual_allocated_range,
535 		gKernelArgs.num_virtual_allocated_ranges);
536 
537 #ifdef TRACE_MMU
538 	{
539 		uint32 i;
540 
541 		dprintf("phys memory ranges:\n");
542 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
543 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_memory_range[i].start, gKernelArgs.physical_memory_range[i].size);
544 		}
545 
546 		dprintf("allocated phys memory ranges:\n");
547 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
548 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_allocated_range[i].start, gKernelArgs.physical_allocated_range[i].size);
549 		}
550 
551 		dprintf("allocated virt memory ranges:\n");
552 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
553 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.virtual_allocated_range[i].start, gKernelArgs.virtual_allocated_range[i].size);
554 		}
555 	}
556 #endif
557 }
558 
559 
560 extern "C" void
561 mmu_init(void)
562 {
563 	TRACE(("mmu_init\n"));
564 
565 	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
566 	gKernelArgs.physical_allocated_range[0].size = 0;
567 	gKernelArgs.num_physical_allocated_ranges = 1;
568 		// remember the start of the allocated physical pages
569 
570 	init_page_directory();
571 
572 	// Map the page directory into kernel space at 0xffc00000-0xffffffff
573 	// this enables a mmu trick where the 4 MB region that this pgdir entry
574 	// represents now maps the 4MB of potential pagetables that the pgdir
575 	// points to. Thrown away later in VM bringup, but useful for now.
576 	sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
577 
578 	// also map it on the next vpage
579 	gKernelArgs.arch_args.vir_pgdir = get_next_virtual_page();
580 	map_page(gKernelArgs.arch_args.vir_pgdir, (uint32)sPageDirectory,
581 		kDefaultPageFlags);
582 
583 	// map in a kernel stack
584 	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
585 		KERNEL_STACK_SIZE);
586 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE;
587 
588 	TRACE(("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
589 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
590 
591 	extended_memory *extMemoryBlock;
592 	uint32 extMemoryCount = get_memory_map(&extMemoryBlock);
593 
594 	// figure out the memory map
595 	if (extMemoryCount > 0) {
596 		gKernelArgs.num_physical_memory_ranges = 0;
597 
598 		for (uint32 i = 0; i < extMemoryCount; i++) {
599 			// Type 1 is available memory
600 			if (extMemoryBlock[i].type == 1) {
601 				// round everything up to page boundaries, exclusive of pages
602 				// it partially occupies
603 				if ((extMemoryBlock[i].base_addr % B_PAGE_SIZE) != 0) {
604 					extMemoryBlock[i].length -= B_PAGE_SIZE
605 						- extMemoryBlock[i].base_addr % B_PAGE_SIZE;
606 				}
607 				extMemoryBlock[i].base_addr
608 					= ROUNDUP(extMemoryBlock[i].base_addr, B_PAGE_SIZE);
609 				extMemoryBlock[i].length
610 					= ROUNDOWN(extMemoryBlock[i].length, B_PAGE_SIZE);
611 
612 				// we ignore all memory beyond 4 GB
613 				if (extMemoryBlock[i].base_addr > 0xffffffffULL)
614 					continue;
615 
616 				if (extMemoryBlock[i].base_addr + extMemoryBlock[i].length
617 						> 0xffffffffULL) {
618 					extMemoryBlock[i].length
619 						= 0x100000000ULL - extMemoryBlock[i].base_addr;
620 				}
621 
622 				if (gKernelArgs.num_physical_memory_ranges > 0) {
623 					// we might want to extend a previous hole
624 					addr_t previousEnd = gKernelArgs.physical_memory_range[
625 							gKernelArgs.num_physical_memory_ranges - 1].start
626 						+ gKernelArgs.physical_memory_range[
627 							gKernelArgs.num_physical_memory_ranges - 1].size;
628 					addr_t holeSize = extMemoryBlock[i].base_addr - previousEnd;
629 
630 					// If the hole is smaller than 1 MB, we try to mark the
631 					// memory as allocated and extend the previous memory range
632 					if (previousEnd <= extMemoryBlock[i].base_addr
633 						&& holeSize < 0x100000
634 						&& insert_physical_allocated_range(previousEnd,
635 							extMemoryBlock[i].base_addr - previousEnd)
636 								== B_OK) {
637 						gKernelArgs.physical_memory_range[
638 							gKernelArgs.num_physical_memory_ranges - 1].size
639 								+= holeSize;
640 					}
641 				}
642 
643 				insert_physical_memory_range(extMemoryBlock[i].base_addr,
644 					extMemoryBlock[i].length);
645 			}
646 		}
647 	} else {
648 		// TODO: for now!
649 		dprintf("No extended memory block - using 32 MB (fix me!)\n");
650 		uint32 memSize = 32 * 1024 * 1024;
651 
652 		// We dont have an extended map, assume memory is contiguously mapped
653 		// at 0x0
654 		gKernelArgs.physical_memory_range[0].start = 0;
655 		gKernelArgs.physical_memory_range[0].size = memSize;
656 		gKernelArgs.num_physical_memory_ranges = 1;
657 
658 		// mark the bios area allocated
659 		uint32 biosRange = gKernelArgs.num_physical_allocated_ranges++;
660 
661 		gKernelArgs.physical_allocated_range[biosRange].start = 0x9f000;
662 			// 640k - 1 page
663 		gKernelArgs.physical_allocated_range[biosRange].size = 0x61000;
664 	}
665 
666 	gKernelArgs.arch_args.page_hole = 0xffc00000;
667 }
668 
669 
670 //	#pragma mark -
671 
672 
673 extern "C" status_t
674 platform_allocate_region(void **_address, size_t size, uint8 protection,
675 	bool /*exactAddress*/)
676 {
677 	void *address = mmu_allocate(*_address, size);
678 	if (address == NULL)
679 		return B_NO_MEMORY;
680 
681 	*_address = address;
682 	return B_OK;
683 }
684 
685 
686 extern "C" status_t
687 platform_free_region(void *address, size_t size)
688 {
689 	mmu_free(address, size);
690 	return B_OK;
691 }
692 
693 
694 void
695 platform_release_heap(struct stage2_args *args, void *base)
696 {
697 	// It will be freed automatically, since it is in the
698 	// identity mapped region, and not stored in the kernel's
699 	// page tables.
700 }
701 
702 
703 status_t
704 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
705 {
706 	void *heap = (void *)get_next_physical_address(args->heap_size);
707 	if (heap == NULL)
708 		return B_NO_MEMORY;
709 
710 	*_base = heap;
711 	*_top = (void *)((int8 *)heap + args->heap_size);
712 	return B_OK;
713 }
714 
715 
716