xref: /haiku/src/system/boot/platform/bios_ia32/mmu.cpp (revision 1acbe440b8dd798953bec31d18ee589aa3f71b73)
1 /*
2  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 #include "bios.h"
11 
12 #include <boot/platform.h>
13 #include <boot/stdio.h>
14 #include <boot/kernel_args.h>
15 #include <boot/stage2.h>
16 #include <arch/cpu.h>
17 #include <arch_kernel.h>
18 #include <kernel.h>
19 
20 #include <OS.h>
21 
22 #include <string.h>
23 
24 
25 /** The (physical) memory layout of the boot loader is currently as follows:
26  *	  0x0500 - 0x10000	protected mode stack
27  *	  0x0500 - 0x09000	real mode stack
28  *	 0x10000 - ?		code (up to ~500 kB)
29  *	 0x90000			1st temporary page table (identity maps 0-4 MB)
30  *	 0x91000			2nd (4-8 MB)
31  *	 0x92000 - 0x92000	further page tables
32  *	 0x9e000 - 0xa0000	SMP trampoline code
33  *	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
34  *	0x100000			page directory
35  *	     ...			boot loader heap (32 kB)
36  *	     ...			free physical memory
37  *
38  *	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
39  *	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
40  *	loader (kernel args, modules, driver settings, ...) comes after
41  *	0x81000000 which means that there is currently only 1 MB reserved for
42  *	the kernel itself (see kMaxKernelSize).
43  */
44 
45 //#define TRACE_MMU
46 #ifdef TRACE_MMU
47 #	define TRACE(x) dprintf x
48 #else
49 #	define TRACE(x) ;
50 #endif
51 
52 struct gdt_idt_descr {
53 	uint16 limit;
54 	uint32 *base;
55 } _PACKED;
56 
57 // memory structure returned by int 0x15, ax 0xe820
58 struct extended_memory {
59 	uint64 base_addr;
60 	uint64 length;
61 	uint32 type;
62 };
63 
64 #ifdef _PXE_ENV
65 
66 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
67 static const size_t kMaxKernelSize = 0x100000;		// 1 MB for the kernel
68 
69 // working page directory and page table
70 static uint32 *sPageDirectory = 0;
71 
72 static addr_t sNextPhysicalAddress = 0x112000;
73 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
74 static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
75 
76 static addr_t sNextPageTableAddress = 0x7d000;
77 static const uint32 kPageTableRegionEnd = 0x8b000;
78 	// we need to reserve 2 pages for the SMP trampoline code
79 
80 #else
81 
82 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
83 static const size_t kMaxKernelSize = 0x100000;		// 1 MB for the kernel
84 
85 // working page directory and page table
86 static uint32 *sPageDirectory = 0;
87 
88 static addr_t sNextPhysicalAddress = 0x100000;
89 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
90 static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
91 
92 static addr_t sNextPageTableAddress = 0x90000;
93 static const uint32 kPageTableRegionEnd = 0x9e000;
94 	// we need to reserve 2 pages for the SMP trampoline code
95 
96 #endif
97 
98 
99 static addr_t
100 get_next_virtual_address(size_t size)
101 {
102 	addr_t address = sNextVirtualAddress;
103 	sNextVirtualAddress += size;
104 
105 	return address;
106 }
107 
108 
109 static addr_t
110 get_next_physical_address(size_t size)
111 {
112 	addr_t address = sNextPhysicalAddress;
113 	sNextPhysicalAddress += size;
114 
115 	return address;
116 }
117 
118 
119 static addr_t
120 get_next_virtual_page()
121 {
122 	return get_next_virtual_address(B_PAGE_SIZE);
123 }
124 
125 
126 static addr_t
127 get_next_physical_page()
128 {
129 	return get_next_physical_address(B_PAGE_SIZE);
130 }
131 
132 
133 static uint32 *
134 get_next_page_table()
135 {
136 	addr_t address = sNextPageTableAddress;
137 	if (address >= kPageTableRegionEnd)
138 		return (uint32 *)get_next_physical_page();
139 
140 	sNextPageTableAddress += B_PAGE_SIZE;
141 	return (uint32 *)address;
142 }
143 
144 
145 /**	Adds a new page table for the specified base address */
146 
147 static void
148 add_page_table(addr_t base)
149 {
150 	TRACE(("add_page_table(base = %p)\n", (void *)base));
151 
152 	// Get new page table and clear it out
153 	uint32 *pageTable = get_next_page_table();
154 	if (pageTable > (uint32 *)(8 * 1024 * 1024))
155 		panic("tried to add page table beyond the indentity mapped 8 MB region\n");
156 
157 	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++] = (uint32)pageTable;
158 
159 	for (int32 i = 0; i < 1024; i++)
160 		pageTable[i] = 0;
161 
162 	// put the new page table into the page directory
163 	sPageDirectory[base/(4*1024*1024)] = (uint32)pageTable | kDefaultPageTableFlags;
164 }
165 
166 
167 static void
168 unmap_page(addr_t virtualAddress)
169 {
170 	TRACE(("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
171 
172 	if (virtualAddress < KERNEL_BASE)
173 		panic("unmap_page: asked to unmap invalid page %p!\n", (void *)virtualAddress);
174 
175 	// unmap the page from the correct page table
176 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
177 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
178 	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
179 
180 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
181 }
182 
183 
184 /** Creates an entry to map the specified virtualAddress to the given
185  *	physicalAddress.
186  *	If the mapping goes beyond the current page table, it will allocate
187  *	a new one. If it cannot map the requested page, it panics.
188  */
189 
190 static void
191 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
192 {
193 	TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
194 
195 	if (virtualAddress < KERNEL_BASE)
196 		panic("map_page: asked to map invalid page %p!\n", (void *)virtualAddress);
197 
198 	if (virtualAddress >= sMaxVirtualAddress) {
199 		// we need to add a new page table
200 
201 		add_page_table(sMaxVirtualAddress);
202 		sMaxVirtualAddress += B_PAGE_SIZE * 1024;
203 
204 		if (virtualAddress >= sMaxVirtualAddress)
205 			panic("map_page: asked to map a page to %p\n", (void *)virtualAddress);
206 	}
207 
208 	physicalAddress &= ~(B_PAGE_SIZE - 1);
209 
210 	// map the page to the correct page table
211 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
212 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
213 	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = physicalAddress | flags;
214 
215 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
216 }
217 
218 
219 static void
220 sort_addr_range(addr_range *range, int count)
221 {
222 	addr_range tempRange;
223 	bool done;
224 	int i;
225 
226 	do {
227 		done = true;
228 		for (i = 1; i < count; i++) {
229 			if (range[i].start < range[i - 1].start) {
230 				done = false;
231 				memcpy(&tempRange, &range[i], sizeof(addr_range));
232 				memcpy(&range[i], &range[i - 1], sizeof(addr_range));
233 				memcpy(&range[i - 1], &tempRange, sizeof(addr_range));
234 			}
235 		}
236 	} while (!done);
237 }
238 
239 
240 static uint32
241 get_memory_map(extended_memory **_extendedMemory)
242 {
243 	extended_memory *block = (extended_memory *)kExtraSegmentScratch;
244 	bios_regs regs = { 0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
245 	uint32 count = 0;
246 
247 	TRACE(("get_memory_map()\n"));
248 
249 	do {
250 		regs.eax = 0xe820;
251 		regs.edx = 'SMAP';
252 
253 		call_bios(0x15, &regs);
254 		if (regs.flags & CARRY_FLAG)
255 			return 0;
256 
257 		regs.edi += sizeof(extended_memory);
258 		count++;
259 	} while (regs.ebx != 0);
260 
261 	*_extendedMemory = block;
262 
263 #ifdef TRACE_MMU
264 	dprintf("extended memory info (from 0xe820):\n");
265 	for (uint32 i = 0; i < count; i++) {
266 		dprintf("    base 0x%Lx, len 0x%Lx, type %lu\n",
267 			block[i].base_addr, block[i].length, block[i].type);
268 	}
269 #endif
270 
271 	return count;
272 }
273 
274 
275 static void
276 init_page_directory(void)
277 {
278 	// allocate a new pgdir
279 	sPageDirectory = (uint32 *)get_next_physical_page();
280 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
281 
282 	// clear out the pgdir
283 	for (int32 i = 0; i < 1024; i++) {
284 		sPageDirectory[i] = 0;
285 	}
286 
287 	// Identity map the first 8 MB of memory so that their
288 	// physical and virtual address are the same.
289 	// These page tables won't be taken over into the kernel.
290 
291 	// make the first page table at the first free spot
292 	uint32 *pageTable = get_next_page_table();
293 
294 	for (int32 i = 0; i < 1024; i++) {
295 		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
296 	}
297 
298 	sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
299 
300 	// make the second page table
301 	pageTable = get_next_page_table();
302 
303 	for (int32 i = 0; i < 1024; i++) {
304 		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
305 	}
306 
307 	sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
308 
309 	gKernelArgs.arch_args.num_pgtables = 0;
310 	add_page_table(KERNEL_BASE);
311 
312 	// switch to the new pgdir and enable paging
313 	asm("movl %0, %%eax;"
314 		"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
315 	// Important.  Make sure supervisor threads can fault on read only pages...
316 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
317 }
318 
319 
320 //	#pragma mark -
321 
322 
323 extern "C" addr_t
324 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
325 {
326 	addr_t address = sNextVirtualAddress;
327 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
328 
329 	physicalAddress -= pageOffset;
330 
331 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
332 		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
333 	}
334 
335 	return address + pageOffset;
336 }
337 
338 
339 extern "C" void *
340 mmu_allocate(void *virtualAddress, size_t size)
341 {
342 	TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: %ld\n",
343 		virtualAddress, sNextVirtualAddress, size));
344 
345 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
346 		// get number of pages to map
347 
348 	if (virtualAddress != NULL) {
349 		// This special path is almost only useful for loading the
350 		// kernel into memory; it will only allow you to map the
351 		// 1 MB following the kernel base address.
352 		// Also, it won't check for already mapped addresses, so
353 		// you better know why you are here :)
354 		addr_t address = (addr_t)virtualAddress;
355 
356 		// is the address within the valid range?
357 		if (address < KERNEL_BASE || address + size >= KERNEL_BASE + kMaxKernelSize)
358 			return NULL;
359 
360 		for (uint32 i = 0; i < size; i++) {
361 			map_page(address, get_next_physical_page(), kDefaultPageFlags);
362 			address += B_PAGE_SIZE;
363 		}
364 
365 		return virtualAddress;
366 	}
367 
368 	void *address = (void *)sNextVirtualAddress;
369 
370 	for (uint32 i = 0; i < size; i++) {
371 		map_page(get_next_virtual_page(), get_next_physical_page(), kDefaultPageFlags);
372 	}
373 
374 	return address;
375 }
376 
377 
378 /**	This will unmap the allocated chunk of memory from the virtual
379  *	address space. It might not actually free memory (as its implementation
380  *	is very simple), but it might.
381  */
382 
383 extern "C" void
384 mmu_free(void *virtualAddress, size_t size)
385 {
386 	TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
387 
388 	addr_t address = (addr_t)virtualAddress;
389 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
390 		// get number of pages to map
391 
392 	// is the address within the valid range?
393 	if (address < KERNEL_BASE
394 		|| address + size >= KERNEL_BASE + kMaxKernelSize) {
395 		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
396 			address, size);
397 	}
398 
399 	// unmap all pages within the range
400 	for (uint32 i = 0; i < size; i++) {
401 		unmap_page(address);
402 		address += B_PAGE_SIZE;
403 	}
404 
405 	if (address == sNextVirtualAddress) {
406 		// we can actually reuse the virtual address space
407 		sNextVirtualAddress -= size;
408 	}
409 }
410 
411 
412 /** Sets up the final and kernel accessible GDT and IDT tables.
413  *	BIOS calls won't work any longer after this function has
414  *	been called.
415  */
416 
417 extern "C" void
418 mmu_init_for_kernel(void)
419 {
420 	// set up a new idt
421 	{
422 		struct gdt_idt_descr idtDescriptor;
423 		uint32 *idt;
424 
425 		// find a new idt
426 		idt = (uint32 *)get_next_physical_page();
427 		gKernelArgs.arch_args.phys_idt = (uint32)idt;
428 
429 		TRACE(("idt at %p\n", idt));
430 
431 		// clear it out
432 		for (int32 i = 0; i < IDT_LIMIT / 4; i++) {
433 			idt[i] = 0;
434 		}
435 
436 		// map the idt into virtual space
437 		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
438 		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
439 
440 		// load the idt
441 		idtDescriptor.limit = IDT_LIMIT - 1;
442 		idtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_idt;
443 
444 		asm("lidt	%0;"
445 			: : "m" (idtDescriptor));
446 
447 		TRACE(("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt));
448 	}
449 
450 	// set up a new gdt
451 	{
452 		struct gdt_idt_descr gdtDescriptor;
453 		segment_descriptor *gdt;
454 
455 		// find a new gdt
456 		gdt = (segment_descriptor *)get_next_physical_page();
457 		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
458 
459 		TRACE(("gdt at %p\n", gdt));
460 
461 		// put standard segment descriptors in it
462 		clear_segment_descriptor(&gdt[0]);
463 		set_segment_descriptor(&gdt[1], 0, 0xffffffff, DT_CODE_READABLE, DPL_KERNEL);
464 			// seg 0x08 - kernel 4GB code
465 		set_segment_descriptor(&gdt[2], 0, 0xffffffff, DT_DATA_WRITEABLE, DPL_KERNEL);
466 			// seg 0x10 - kernel 4GB data
467 
468 		set_segment_descriptor(&gdt[3], 0, 0xffffffff, DT_CODE_READABLE, DPL_USER);
469 			// seg 0x1b - ring 3 user 4GB code
470 		set_segment_descriptor(&gdt[4], 0, 0xffffffff, DT_DATA_WRITEABLE, DPL_USER);
471 			// seg 0x23 - ring 3 user 4GB data
472 
473 		// gdt[5] and above will be filled later by the kernel
474 		// to contain the TSS descriptors, and for TLS (one for every CPU)
475 
476 		// map the gdt into virtual space
477 		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
478 		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
479 
480 		// load the GDT
481 		gdtDescriptor.limit = GDT_LIMIT - 1;
482 		gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt;
483 
484 		asm("lgdt	%0;"
485 			: : "m" (gdtDescriptor));
486 
487 		TRACE(("gdt at virtual address %p\n", (void *)gKernelArgs.arch_args.vir_gdt));
488 	}
489 
490 	// save the memory we've physically allocated
491 	gKernelArgs.physical_allocated_range[0].size = sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
492 
493 	// save the memory we've virtually allocated (for the kernel and other stuff)
494 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
495 	gKernelArgs.virtual_allocated_range[0].size = sNextVirtualAddress - KERNEL_BASE;
496 	gKernelArgs.num_virtual_allocated_ranges = 1;
497 
498 	// sort the address ranges
499 	sort_addr_range(gKernelArgs.physical_memory_range, gKernelArgs.num_physical_memory_ranges);
500 	sort_addr_range(gKernelArgs.physical_allocated_range, gKernelArgs.num_physical_allocated_ranges);
501 	sort_addr_range(gKernelArgs.virtual_allocated_range, gKernelArgs.num_virtual_allocated_ranges);
502 
503 #ifdef TRACE_MMU
504 	{
505 		uint32 i;
506 
507 		dprintf("phys memory ranges:\n");
508 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
509 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_memory_range[i].start, gKernelArgs.physical_memory_range[i].size);
510 		}
511 
512 		dprintf("allocated phys memory ranges:\n");
513 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
514 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_allocated_range[i].start, gKernelArgs.physical_allocated_range[i].size);
515 		}
516 
517 		dprintf("allocated virt memory ranges:\n");
518 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
519 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.virtual_allocated_range[i].start, gKernelArgs.virtual_allocated_range[i].size);
520 		}
521 	}
522 #endif
523 }
524 
525 
526 extern "C" void
527 mmu_init(void)
528 {
529 	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
530 	gKernelArgs.physical_allocated_range[0].size = 0;
531 	gKernelArgs.num_physical_allocated_ranges = 1;
532 		// remember the start of the allocated physical pages
533 
534 	init_page_directory();
535 
536 	// Map the page directory into kernel space at 0xffc00000-0xffffffff
537 	// this enables a mmu trick where the 4 MB region that this pgdir entry
538 	// represents now maps the 4MB of potential pagetables that the pgdir
539 	// points to. Thrown away later in VM bringup, but useful for now.
540 	sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
541 
542 	// also map it on the next vpage
543 	gKernelArgs.arch_args.vir_pgdir = get_next_virtual_page();
544 	map_page(gKernelArgs.arch_args.vir_pgdir, (uint32)sPageDirectory, kDefaultPageFlags);
545 
546 	// map in a kernel stack
547 	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL, KERNEL_STACK_SIZE);
548 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE;
549 
550 	TRACE(("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
551 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
552 
553 	extended_memory *extMemoryBlock;
554 	uint32 extMemoryCount = get_memory_map(&extMemoryBlock);
555 
556 	// figure out the memory map
557 	if (extMemoryCount > 0) {
558 		gKernelArgs.num_physical_memory_ranges = 0;
559 
560 		for (uint32 i = 0; i < extMemoryCount; i++) {
561 			// Type 1 is available memory
562 			if (extMemoryBlock[i].type == 1) {
563 				// round everything up to page boundaries, exclusive of pages
564 				// it partially occupies
565 				extMemoryBlock[i].length -= (extMemoryBlock[i].base_addr % B_PAGE_SIZE)
566 					? (B_PAGE_SIZE - (extMemoryBlock[i].base_addr % B_PAGE_SIZE)) : 0;
567 				extMemoryBlock[i].base_addr = ROUNDUP(extMemoryBlock[i].base_addr, B_PAGE_SIZE);
568 				extMemoryBlock[i].length = ROUNDOWN(extMemoryBlock[i].length, B_PAGE_SIZE);
569 
570 				// we ignore all memory beyond 4 GB
571 				if (extMemoryBlock[i].base_addr > 0xffffffffULL)
572 					continue;
573 				if (extMemoryBlock[i].base_addr + extMemoryBlock[i].length > 0xffffffffULL)
574 					extMemoryBlock[i].length = 0x100000000ULL - extMemoryBlock[i].base_addr;
575 
576 				if (gKernelArgs.num_physical_memory_ranges > 0) {
577 					// we might want to extend a previous hole
578 					addr_t previousEnd = gKernelArgs.physical_memory_range[
579 							gKernelArgs.num_physical_memory_ranges - 1].start
580 						+ gKernelArgs.physical_memory_range[
581 							gKernelArgs.num_physical_memory_ranges - 1].size;
582 					addr_t holeSize = extMemoryBlock[i].base_addr - previousEnd;
583 
584 					// if the hole is smaller than 1 MB, we try to mark the memory
585 					// as allocated and extend the previous memory range
586 					if (previousEnd <= extMemoryBlock[i].base_addr
587 						&& holeSize < 0x100000
588 						&& insert_physical_allocated_range(previousEnd,
589 							extMemoryBlock[i].base_addr - previousEnd) == B_OK) {
590 						gKernelArgs.physical_memory_range[
591 							gKernelArgs.num_physical_memory_ranges - 1].size += holeSize;
592 					}
593 				}
594 
595 				insert_physical_memory_range(extMemoryBlock[i].base_addr,
596 					extMemoryBlock[i].length);
597 			}
598 		}
599 	} else {
600 		// ToDo: for now!
601 		dprintf("No extended memory block - using 32 MB (fix me!)\n");
602 		uint32 memSize = 32 * 1024 * 1024;
603 
604 		// we dont have an extended map, assume memory is contiguously mapped at 0x0
605 		gKernelArgs.physical_memory_range[0].start = 0;
606 		gKernelArgs.physical_memory_range[0].size = memSize;
607 		gKernelArgs.num_physical_memory_ranges = 1;
608 
609 		// mark the bios area allocated
610 		gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].start = 0x9f000; // 640k - 1 page
611 		gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].size = 0x61000;
612 		gKernelArgs.num_physical_allocated_ranges++;
613 	}
614 
615 	gKernelArgs.arch_args.page_hole = 0xffc00000;
616 }
617 
618 
619 //	#pragma mark -
620 
621 
622 extern "C" status_t
623 platform_allocate_region(void **_address, size_t size, uint8 protection,
624 	bool /*exactAddress*/)
625 {
626 	void *address = mmu_allocate(*_address, size);
627 	if (address == NULL)
628 		return B_NO_MEMORY;
629 
630 	*_address = address;
631 	return B_OK;
632 }
633 
634 
635 extern "C" status_t
636 platform_free_region(void *address, size_t size)
637 {
638 	mmu_free(address, size);
639 	return B_OK;
640 }
641 
642 
643 void
644 platform_release_heap(struct stage2_args *args, void *base)
645 {
646 	// It will be freed automatically, since it is in the
647 	// identity mapped region, and not stored in the kernel's
648 	// page tables.
649 }
650 
651 
652 status_t
653 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
654 {
655 	void *heap = (void *)get_next_physical_address(args->heap_size);
656 	if (heap == NULL)
657 		return B_NO_MEMORY;
658 
659 	*_base = heap;
660 	*_top = (void *)((int8 *)heap + args->heap_size);
661 	return B_OK;
662 }
663 
664 
665