xref: /haiku/src/system/boot/platform/bios_ia32/mmu.cpp (revision e6b30aee0fd7a23d6a6baab9f3718945a0cd838a)
1 /*
2  * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
3  * Based on code written by Travis Geiselbrecht for NewOS.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "mmu.h"
10 #include "bios.h"
11 
12 #include <boot/platform.h>
13 #include <boot/stdio.h>
14 #include <boot/kernel_args.h>
15 #include <boot/stage2.h>
16 #include <arch/cpu.h>
17 #include <arch_kernel.h>
18 #include <kernel.h>
19 
20 #include <OS.h>
21 
22 #include <string.h>
23 
24 
25 /** The (physical) memory layout of the boot loader is currently as follows:
26  *	  0x0500 - 0x10000	protected mode stack
27  *	  0x0500 - 0x09000	real mode stack
28  *	 0x10000 - ?		code (up to ~500 kB)
29  *	 0x90000			1st temporary page table (identity maps 0-4 MB)
30  *	 0x91000			2nd (4-8 MB)
31  *	 0x92000 - 0x92000	further page tables
32  *	 0x9e000 - 0xa0000	SMP trampoline code
33  *	[0xa0000 - 0x100000	BIOS/ROM/reserved area]
34  *	0x100000			page directory
35  *	     ...			boot loader heap (32 kB)
36  *	     ...			free physical memory
37  *
38  *	The first 8 MB are identity mapped (0x0 - 0x0800000); paging is turned
39  *	on. The kernel is mapped at 0x80000000, all other stuff mapped by the
40  *	loader (kernel args, modules, driver settings, ...) comes after
41  *	0x81000000 which means that there is currently only 1 MB reserved for
42  *	the kernel itself (see kMaxKernelSize).
43  */
44 
45 //#define TRACE_MMU
46 #ifdef TRACE_MMU
47 #	define TRACE(x) dprintf x
48 #else
49 #	define TRACE(x) ;
50 #endif
51 
52 struct gdt_idt_descr {
53 	uint16 limit;
54 	uint32 *base;
55 } _PACKED;
56 
57 // memory structure returned by int 0x15, ax 0xe820
58 struct extended_memory {
59 	uint64 base_addr;
60 	uint64 length;
61 	uint32 type;
62 };
63 
64 #ifdef _PXE_ENV
65 
66 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
67 static const size_t kMaxKernelSize = 0x100000;		// 1 MB for the kernel
68 
69 // working page directory and page table
70 static uint32 *sPageDirectory = 0;
71 
72 static addr_t sNextPhysicalAddress = 0x112000;
73 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
74 static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
75 
76 static addr_t sNextPageTableAddress = 0x7d000;
77 static const uint32 kPageTableRegionEnd = 0x8b000;
78 	// we need to reserve 2 pages for the SMP trampoline code
79 
80 #else
81 
82 static const uint32 kDefaultPageTableFlags = 0x07;	// present, user, R/W
83 static const size_t kMaxKernelSize = 0x100000;		// 1 MB for the kernel
84 
85 // working page directory and page table
86 static uint32 *sPageDirectory = 0;
87 
88 static addr_t sNextPhysicalAddress = 0x100000;
89 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
90 static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
91 
92 static addr_t sNextPageTableAddress = 0x90000;
93 static const uint32 kPageTableRegionEnd = 0x9e000;
94 	// we need to reserve 2 pages for the SMP trampoline code
95 
96 #endif
97 
98 
99 static addr_t
100 get_next_virtual_address(size_t size)
101 {
102 	addr_t address = sNextVirtualAddress;
103 	sNextVirtualAddress += size;
104 
105 	return address;
106 }
107 
108 
109 static addr_t
110 get_next_physical_address(size_t size)
111 {
112 	addr_t address = sNextPhysicalAddress;
113 	sNextPhysicalAddress += size;
114 
115 	return address;
116 }
117 
118 
119 static addr_t
120 get_next_virtual_page()
121 {
122 	return get_next_virtual_address(B_PAGE_SIZE);
123 }
124 
125 
126 static addr_t
127 get_next_physical_page()
128 {
129 	return get_next_physical_address(B_PAGE_SIZE);
130 }
131 
132 
133 static uint32 *
134 get_next_page_table()
135 {
136 	TRACE(("get_next_page_table, sNextPageTableAddress %p, kPageTableRegionEnd %p\n",
137 		sNextPageTableAddress, kPageTableRegionEnd));
138 
139 	addr_t address = sNextPageTableAddress;
140 	if (address >= kPageTableRegionEnd)
141 		return (uint32 *)get_next_physical_page();
142 
143 	sNextPageTableAddress += B_PAGE_SIZE;
144 	return (uint32 *)address;
145 }
146 
147 
148 /**	Adds a new page table for the specified base address */
149 
150 static void
151 add_page_table(addr_t base)
152 {
153 	TRACE(("add_page_table(base = %p)\n", (void *)base));
154 
155 	// Get new page table and clear it out
156 	uint32 *pageTable = get_next_page_table();
157 	if (pageTable > (uint32 *)(8 * 1024 * 1024))
158 		panic("tried to add page table beyond the indentity mapped 8 MB region\n");
159 
160 	gKernelArgs.arch_args.pgtables[gKernelArgs.arch_args.num_pgtables++] = (uint32)pageTable;
161 
162 	for (int32 i = 0; i < 1024; i++)
163 		pageTable[i] = 0;
164 
165 	// put the new page table into the page directory
166 	sPageDirectory[base/(4*1024*1024)] = (uint32)pageTable | kDefaultPageTableFlags;
167 }
168 
169 
170 static void
171 unmap_page(addr_t virtualAddress)
172 {
173 	TRACE(("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
174 
175 	if (virtualAddress < KERNEL_BASE)
176 		panic("unmap_page: asked to unmap invalid page %p!\n", (void *)virtualAddress);
177 
178 	// unmap the page from the correct page table
179 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
180 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
181 	pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
182 
183 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
184 }
185 
186 
187 /** Creates an entry to map the specified virtualAddress to the given
188  *	physicalAddress.
189  *	If the mapping goes beyond the current page table, it will allocate
190  *	a new one. If it cannot map the requested page, it panics.
191  */
192 
193 static void
194 map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
195 {
196 	TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
197 
198 	if (virtualAddress < KERNEL_BASE)
199 		panic("map_page: asked to map invalid page %p!\n", (void *)virtualAddress);
200 
201 	if (virtualAddress >= sMaxVirtualAddress) {
202 		// we need to add a new page table
203 
204 		add_page_table(sMaxVirtualAddress);
205 		sMaxVirtualAddress += B_PAGE_SIZE * 1024;
206 
207 		if (virtualAddress >= sMaxVirtualAddress)
208 			panic("map_page: asked to map a page to %p\n", (void *)virtualAddress);
209 	}
210 
211 	physicalAddress &= ~(B_PAGE_SIZE - 1);
212 
213 	// map the page to the correct page table
214 	uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
215 		/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
216 	uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE;
217 
218 	TRACE(("map_page: inserting pageTable %p, tableEntry %ld, physicalAddress %p\n",
219 		pageTable, tableEntry, physicalAddress));
220 
221 	pageTable[tableEntry] = physicalAddress | flags;
222 
223 	asm volatile("invlpg (%0)" : : "r" (virtualAddress));
224 
225 	TRACE(("map_page: done\n"));
226 }
227 
228 
229 static void
230 sort_addr_range(addr_range *range, int count)
231 {
232 	addr_range tempRange;
233 	bool done;
234 	int i;
235 
236 	do {
237 		done = true;
238 		for (i = 1; i < count; i++) {
239 			if (range[i].start < range[i - 1].start) {
240 				done = false;
241 				memcpy(&tempRange, &range[i], sizeof(addr_range));
242 				memcpy(&range[i], &range[i - 1], sizeof(addr_range));
243 				memcpy(&range[i - 1], &tempRange, sizeof(addr_range));
244 			}
245 		}
246 	} while (!done);
247 }
248 
249 
250 static uint32
251 get_memory_map(extended_memory **_extendedMemory)
252 {
253 	extended_memory *block = (extended_memory *)kExtraSegmentScratch;
254 	bios_regs regs = { 0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
255 	uint32 count = 0;
256 
257 	TRACE(("get_memory_map()\n"));
258 
259 	do {
260 		regs.eax = 0xe820;
261 		regs.edx = 'SMAP';
262 
263 		call_bios(0x15, &regs);
264 		if (regs.flags & CARRY_FLAG)
265 			return 0;
266 
267 		regs.edi += sizeof(extended_memory);
268 		count++;
269 	} while (regs.ebx != 0);
270 
271 	*_extendedMemory = block;
272 
273 #ifdef TRACE_MMU
274 	dprintf("extended memory info (from 0xe820):\n");
275 	for (uint32 i = 0; i < count; i++) {
276 		dprintf("    base 0x%Lx, len 0x%Lx, type %lu\n",
277 			block[i].base_addr, block[i].length, block[i].type);
278 	}
279 #endif
280 
281 	return count;
282 }
283 
284 
285 static void
286 init_page_directory(void)
287 {
288 	TRACE(("init_page_directory\n"));
289 
290 	// allocate a new pgdir
291 	sPageDirectory = (uint32 *)get_next_physical_page();
292 	gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
293 
294 	// clear out the pgdir
295 	for (int32 i = 0; i < 1024; i++) {
296 		sPageDirectory[i] = 0;
297 	}
298 
299 	// Identity map the first 8 MB of memory so that their
300 	// physical and virtual address are the same.
301 	// These page tables won't be taken over into the kernel.
302 
303 	// make the first page table at the first free spot
304 	uint32 *pageTable = get_next_page_table();
305 
306 	for (int32 i = 0; i < 1024; i++) {
307 		pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
308 	}
309 
310 	sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
311 
312 	// make the second page table
313 	pageTable = get_next_page_table();
314 
315 	for (int32 i = 0; i < 1024; i++) {
316 		pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
317 	}
318 
319 	sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
320 
321 	gKernelArgs.arch_args.num_pgtables = 0;
322 	add_page_table(KERNEL_BASE);
323 
324 	// switch to the new pgdir and enable paging
325 	asm("movl %0, %%eax;"
326 		"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
327 	// Important.  Make sure supervisor threads can fault on read only pages...
328 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
329 }
330 
331 
332 //	#pragma mark -
333 
334 
335 extern "C" addr_t
336 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
337 {
338 	addr_t address = sNextVirtualAddress;
339 	addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
340 
341 	physicalAddress -= pageOffset;
342 
343 	for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
344 		map_page(get_next_virtual_page(), physicalAddress + offset, flags);
345 	}
346 
347 	return address + pageOffset;
348 }
349 
350 
351 extern "C" void *
352 mmu_allocate(void *virtualAddress, size_t size)
353 {
354 	TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, size: %ld\n",
355 		virtualAddress, sNextVirtualAddress, size));
356 
357 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
358 		// get number of pages to map
359 
360 	if (virtualAddress != NULL) {
361 		// This special path is almost only useful for loading the
362 		// kernel into memory; it will only allow you to map the
363 		// 1 MB following the kernel base address.
364 		// Also, it won't check for already mapped addresses, so
365 		// you better know why you are here :)
366 		addr_t address = (addr_t)virtualAddress;
367 
368 		// is the address within the valid range?
369 		if (address < KERNEL_BASE || address + size >= KERNEL_BASE + kMaxKernelSize)
370 			return NULL;
371 
372 		for (uint32 i = 0; i < size; i++) {
373 			map_page(address, get_next_physical_page(), kDefaultPageFlags);
374 			address += B_PAGE_SIZE;
375 		}
376 
377 		return virtualAddress;
378 	}
379 
380 	void *address = (void *)sNextVirtualAddress;
381 
382 	for (uint32 i = 0; i < size; i++) {
383 		map_page(get_next_virtual_page(), get_next_physical_page(), kDefaultPageFlags);
384 	}
385 
386 	return address;
387 }
388 
389 
390 /**	This will unmap the allocated chunk of memory from the virtual
391  *	address space. It might not actually free memory (as its implementation
392  *	is very simple), but it might.
393  */
394 
395 extern "C" void
396 mmu_free(void *virtualAddress, size_t size)
397 {
398 	TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, size));
399 
400 	addr_t address = (addr_t)virtualAddress;
401 	size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
402 		// get number of pages to map
403 
404 	// is the address within the valid range?
405 	if (address < KERNEL_BASE
406 		|| address + size >= KERNEL_BASE + kMaxKernelSize) {
407 		panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
408 			(void *)address, size);
409 	}
410 
411 	// unmap all pages within the range
412 	for (uint32 i = 0; i < size; i++) {
413 		unmap_page(address);
414 		address += B_PAGE_SIZE;
415 	}
416 
417 	if (address == sNextVirtualAddress) {
418 		// we can actually reuse the virtual address space
419 		sNextVirtualAddress -= size;
420 	}
421 }
422 
423 
424 /** Sets up the final and kernel accessible GDT and IDT tables.
425  *	BIOS calls won't work any longer after this function has
426  *	been called.
427  */
428 
429 extern "C" void
430 mmu_init_for_kernel(void)
431 {
432 	TRACE(("mmu_init_for_kernel\n"));
433 	// set up a new idt
434 	{
435 		struct gdt_idt_descr idtDescriptor;
436 		uint32 *idt;
437 
438 		// find a new idt
439 		idt = (uint32 *)get_next_physical_page();
440 		gKernelArgs.arch_args.phys_idt = (uint32)idt;
441 
442 		TRACE(("idt at %p\n", idt));
443 
444 		// map the idt into virtual space
445 		gKernelArgs.arch_args.vir_idt = (uint32)get_next_virtual_page();
446 		map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
447 
448 		// clear it out
449 		uint32* virtualIDT = (uint32*)gKernelArgs.arch_args.vir_idt;
450 		for (int32 i = 0; i < IDT_LIMIT / 4; i++) {
451 			virtualIDT[i] = 0;
452 		}
453 
454 		// load the idt
455 		idtDescriptor.limit = IDT_LIMIT - 1;
456 		idtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_idt;
457 
458 		asm("lidt	%0;"
459 			: : "m" (idtDescriptor));
460 
461 		TRACE(("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt));
462 	}
463 
464 	// set up a new gdt
465 	{
466 		struct gdt_idt_descr gdtDescriptor;
467 		segment_descriptor *gdt;
468 
469 		// find a new gdt
470 		gdt = (segment_descriptor *)get_next_physical_page();
471 		gKernelArgs.arch_args.phys_gdt = (uint32)gdt;
472 
473 		TRACE(("gdt at %p\n", gdt));
474 
475 		// map the gdt into virtual space
476 		gKernelArgs.arch_args.vir_gdt = (uint32)get_next_virtual_page();
477 		map_page(gKernelArgs.arch_args.vir_gdt, (uint32)gdt, kDefaultPageFlags);
478 
479 		// put standard segment descriptors in it
480 		segment_descriptor* virtualGDT
481 			= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
482 		clear_segment_descriptor(&virtualGDT[0]);
483 
484 		// seg 0x08 - kernel 4GB code
485 		set_segment_descriptor(&virtualGDT[1], 0, 0xffffffff, DT_CODE_READABLE,
486 			DPL_KERNEL);
487 
488 		// seg 0x10 - kernel 4GB data
489 		set_segment_descriptor(&virtualGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE,
490 			DPL_KERNEL);
491 
492 		// seg 0x1b - ring 3 user 4GB code
493 		set_segment_descriptor(&virtualGDT[3], 0, 0xffffffff, DT_CODE_READABLE,
494 			DPL_USER);
495 
496 		// seg 0x23 - ring 3 user 4GB data
497 		set_segment_descriptor(&virtualGDT[4], 0, 0xffffffff, DT_DATA_WRITEABLE,
498 			DPL_USER);
499 
500 		// virtualGDT[5] and above will be filled later by the kernel
501 		// to contain the TSS descriptors, and for TLS (one for every CPU)
502 
503 		// load the GDT
504 		gdtDescriptor.limit = GDT_LIMIT - 1;
505 		gdtDescriptor.base = (uint32 *)gKernelArgs.arch_args.vir_gdt;
506 
507 		asm("lgdt	%0;"
508 			: : "m" (gdtDescriptor));
509 
510 		TRACE(("gdt at virtual address %p\n", (void *)gKernelArgs.arch_args.vir_gdt));
511 	}
512 
513 	// save the memory we've physically allocated
514 	gKernelArgs.physical_allocated_range[0].size = sNextPhysicalAddress - gKernelArgs.physical_allocated_range[0].start;
515 
516 	// save the memory we've virtually allocated (for the kernel and other stuff)
517 	gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
518 	gKernelArgs.virtual_allocated_range[0].size = sNextVirtualAddress - KERNEL_BASE;
519 	gKernelArgs.num_virtual_allocated_ranges = 1;
520 
521 	// sort the address ranges
522 	sort_addr_range(gKernelArgs.physical_memory_range, gKernelArgs.num_physical_memory_ranges);
523 	sort_addr_range(gKernelArgs.physical_allocated_range, gKernelArgs.num_physical_allocated_ranges);
524 	sort_addr_range(gKernelArgs.virtual_allocated_range, gKernelArgs.num_virtual_allocated_ranges);
525 
526 #ifdef TRACE_MMU
527 	{
528 		uint32 i;
529 
530 		dprintf("phys memory ranges:\n");
531 		for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
532 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_memory_range[i].start, gKernelArgs.physical_memory_range[i].size);
533 		}
534 
535 		dprintf("allocated phys memory ranges:\n");
536 		for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
537 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.physical_allocated_range[i].start, gKernelArgs.physical_allocated_range[i].size);
538 		}
539 
540 		dprintf("allocated virt memory ranges:\n");
541 		for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
542 			dprintf("    base 0x%08lx, length 0x%08lx\n", gKernelArgs.virtual_allocated_range[i].start, gKernelArgs.virtual_allocated_range[i].size);
543 		}
544 	}
545 #endif
546 }
547 
548 
549 extern "C" void
550 mmu_init(void)
551 {
552 	TRACE(("mmu_init\n"));
553 
554 	gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
555 	gKernelArgs.physical_allocated_range[0].size = 0;
556 	gKernelArgs.num_physical_allocated_ranges = 1;
557 		// remember the start of the allocated physical pages
558 
559 	init_page_directory();
560 
561 	// Map the page directory into kernel space at 0xffc00000-0xffffffff
562 	// this enables a mmu trick where the 4 MB region that this pgdir entry
563 	// represents now maps the 4MB of potential pagetables that the pgdir
564 	// points to. Thrown away later in VM bringup, but useful for now.
565 	sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
566 
567 	// also map it on the next vpage
568 	gKernelArgs.arch_args.vir_pgdir = get_next_virtual_page();
569 	map_page(gKernelArgs.arch_args.vir_pgdir, (uint32)sPageDirectory, kDefaultPageFlags);
570 
571 	// map in a kernel stack
572 	gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL, KERNEL_STACK_SIZE);
573 	gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE;
574 
575 	TRACE(("kernel stack at 0x%lx to 0x%lx\n", gKernelArgs.cpu_kstack[0].start,
576 		gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
577 
578 	extended_memory *extMemoryBlock;
579 	uint32 extMemoryCount = get_memory_map(&extMemoryBlock);
580 
581 	// figure out the memory map
582 	if (extMemoryCount > 0) {
583 		gKernelArgs.num_physical_memory_ranges = 0;
584 
585 		for (uint32 i = 0; i < extMemoryCount; i++) {
586 			// Type 1 is available memory
587 			if (extMemoryBlock[i].type == 1) {
588 				// round everything up to page boundaries, exclusive of pages
589 				// it partially occupies
590 				extMemoryBlock[i].length -= (extMemoryBlock[i].base_addr % B_PAGE_SIZE)
591 					? (B_PAGE_SIZE - (extMemoryBlock[i].base_addr % B_PAGE_SIZE)) : 0;
592 				extMemoryBlock[i].base_addr = ROUNDUP(extMemoryBlock[i].base_addr, B_PAGE_SIZE);
593 				extMemoryBlock[i].length = ROUNDOWN(extMemoryBlock[i].length, B_PAGE_SIZE);
594 
595 				// we ignore all memory beyond 4 GB
596 				if (extMemoryBlock[i].base_addr > 0xffffffffULL)
597 					continue;
598 				if (extMemoryBlock[i].base_addr + extMemoryBlock[i].length > 0xffffffffULL)
599 					extMemoryBlock[i].length = 0x100000000ULL - extMemoryBlock[i].base_addr;
600 
601 				if (gKernelArgs.num_physical_memory_ranges > 0) {
602 					// we might want to extend a previous hole
603 					addr_t previousEnd = gKernelArgs.physical_memory_range[
604 							gKernelArgs.num_physical_memory_ranges - 1].start
605 						+ gKernelArgs.physical_memory_range[
606 							gKernelArgs.num_physical_memory_ranges - 1].size;
607 					addr_t holeSize = extMemoryBlock[i].base_addr - previousEnd;
608 
609 					// if the hole is smaller than 1 MB, we try to mark the memory
610 					// as allocated and extend the previous memory range
611 					if (previousEnd <= extMemoryBlock[i].base_addr
612 						&& holeSize < 0x100000
613 						&& insert_physical_allocated_range(previousEnd,
614 							extMemoryBlock[i].base_addr - previousEnd) == B_OK) {
615 						gKernelArgs.physical_memory_range[
616 							gKernelArgs.num_physical_memory_ranges - 1].size += holeSize;
617 					}
618 				}
619 
620 				insert_physical_memory_range(extMemoryBlock[i].base_addr,
621 					extMemoryBlock[i].length);
622 			}
623 		}
624 	} else {
625 		// ToDo: for now!
626 		dprintf("No extended memory block - using 32 MB (fix me!)\n");
627 		uint32 memSize = 32 * 1024 * 1024;
628 
629 		// we dont have an extended map, assume memory is contiguously mapped at 0x0
630 		gKernelArgs.physical_memory_range[0].start = 0;
631 		gKernelArgs.physical_memory_range[0].size = memSize;
632 		gKernelArgs.num_physical_memory_ranges = 1;
633 
634 		// mark the bios area allocated
635 		gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].start = 0x9f000; // 640k - 1 page
636 		gKernelArgs.physical_allocated_range[gKernelArgs.num_physical_allocated_ranges].size = 0x61000;
637 		gKernelArgs.num_physical_allocated_ranges++;
638 	}
639 
640 	gKernelArgs.arch_args.page_hole = 0xffc00000;
641 }
642 
643 
644 //	#pragma mark -
645 
646 
647 extern "C" status_t
648 platform_allocate_region(void **_address, size_t size, uint8 protection,
649 	bool /*exactAddress*/)
650 {
651 	void *address = mmu_allocate(*_address, size);
652 	if (address == NULL)
653 		return B_NO_MEMORY;
654 
655 	*_address = address;
656 	return B_OK;
657 }
658 
659 
660 extern "C" status_t
661 platform_free_region(void *address, size_t size)
662 {
663 	mmu_free(address, size);
664 	return B_OK;
665 }
666 
667 
668 void
669 platform_release_heap(struct stage2_args *args, void *base)
670 {
671 	// It will be freed automatically, since it is in the
672 	// identity mapped region, and not stored in the kernel's
673 	// page tables.
674 }
675 
676 
677 status_t
678 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
679 {
680 	void *heap = (void *)get_next_physical_address(args->heap_size);
681 	if (heap == NULL)
682 		return B_NO_MEMORY;
683 
684 	*_base = heap;
685 	*_top = (void *)((int8 *)heap + args->heap_size);
686 	return B_OK;
687 }
688 
689 
690