xref: /haiku/src/system/kernel/arch/arm/arch_vm.cpp (revision 6f80a9801fedbe7355c4360bd204ba746ec3ec2d)
1 /*
2  * Copyright 2007, François Revol, revol@free.fr.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
6  * Distributed under the terms of the MIT License.
7  *
8  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
9  * Distributed under the terms of the NewOS License.
10  */
11 
12 
13 #include <KernelExport.h>
14 
15 #include <kernel.h>
16 #include <boot/kernel_args.h>
17 
18 #include <vm/vm.h>
19 #include <vm/VMAddressSpace.h>
20 #include <vm/vm_types.h>
21 #include <arch/vm.h>
22 //#include <arch_mmu.h>
23 
24 
25 //#define TRACE_ARCH_VM
26 #ifdef TRACE_ARCH_VM
27 #	define TRACE(x) dprintf x
28 #else
29 #	define TRACE(x) ;
30 #endif
31 
32 
33 status_t
34 arch_vm_init(kernel_args *args)
35 {
36 	TRACE(("arch_vm_init: entry\n"));
37 	return B_OK;
38 }
39 
40 
41 status_t
42 arch_vm_init2(kernel_args *args)
43 {
44 	return B_OK;
45 }
46 
47 
48 status_t
49 arch_vm_init_post_area(kernel_args *args)
50 {
51 	TRACE(("arch_vm_init_post_area: entry\n"));
52 	return B_OK;
53 }
54 
55 
56 status_t
57 arch_vm_init_end(kernel_args *args)
58 {
59 	TRACE(("arch_vm_init_end(): %" B_PRIu32 " virtual ranges to keep:\n",
60 		args->arch_args.num_virtual_ranges_to_keep));
61 
62 	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
63 		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
64 
65 		TRACE(("  start: %p, size: %#" B_PRIxSIZE "\n", (void*)range.start, range.size));
66 
67 		// skip ranges outside the kernel address space
68 		if (!IS_KERNEL_ADDRESS(range.start)) {
69 			TRACE(("    no kernel address, skipping...\n"));
70 			continue;
71 		}
72 
73 		phys_addr_t physicalAddress;
74 		void *address = (void*)range.start;
75 		if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
76 			&physicalAddress) != B_OK)
77 			panic("arch_vm_init_end(): No page mapping for %p\n", address);
78 
79 		area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
80 			"boot loader reserved area", &address,
81 			B_EXACT_ADDRESS, range.size,
82 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
83 			physicalAddress, true);
84 
85 		if (area < 0) {
86 			panic("arch_vm_init_end(): Failed to create area for boot loader "
87 				"reserved area: %p - %p\n", (void*)range.start,
88 				(void*)(range.start + range.size));
89 		}
90 	}
91 
92 	// Throw away all mappings that are unused by the kernel
93 	vm_free_unused_boot_loader_range(KERNEL_LOAD_BASE, KERNEL_SIZE);
94 
95 	return B_OK;
96 }
97 
98 
99 status_t
100 arch_vm_init_post_modules(kernel_args *args)
101 {
102 	return B_OK;
103 }
104 
105 
106 void
107 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
108 {
109 	// This functions is only invoked when a userland thread is in the process
110 	// of dying. It switches to the kernel team and does whatever cleanup is
111 	// necessary (in case it is the team's main thread, it will delete the
112 	// team).
113 	// It is however not necessary to change the page directory. Userland team's
114 	// page directories include all kernel mappings as well. Furthermore our
115 	// arch specific translation map data objects are ref-counted, so they won't
116 	// go away as long as they are still used on any CPU.
117 }
118 
119 
120 bool
121 arch_vm_supports_protection(uint32 protection)
122 {
123 	// TODO check ARM protection possibilities
124 	return true;
125 }
126 
127 
128 void
129 arch_vm_unset_memory_type(VMArea *area)
130 {
131 }
132 
133 
134 status_t
135 arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type)
136 {
137 	return B_OK;
138 }
139