xref: /haiku/src/system/kernel/arch/arm64/arch_vm.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2019-2022 Haiku, Inc. All Rights Reserved.
3  * Distributed under the terms of the MIT License.
4  */
5 #include <KernelExport.h>
6 
7 #include <boot/kernel_args.h>
8 #include <kernel.h>
9 
10 #include <arch/vm.h>
11 #include <vm/vm.h>
12 #include <vm/VMAddressSpace.h>
13 #include <vm/vm_types.h>
14 
15 #include "VMSAv8TranslationMap.h"
16 
17 //#define TRACE_ARCH_VM
18 #ifdef TRACE_ARCH_VM
19 #	define TRACE(x...) dprintf(x)
20 #else
21 #	define TRACE(x...) ;
22 #endif
23 
24 
25 status_t
26 arch_vm_init(kernel_args* args)
27 {
28 	TRACE("arch_vm_init\n");
29 	return B_OK;
30 }
31 
32 
33 status_t
34 arch_vm_init2(kernel_args* args)
35 {
36 	TRACE("arch_vm_init2\n");
37 	return B_OK;
38 }
39 
40 
41 status_t
42 arch_vm_init_post_area(kernel_args* args)
43 {
44 	TRACE("arch_vm_init_post_area\n");
45 	return B_OK;
46 }
47 
48 
49 status_t
50 arch_vm_init_end(kernel_args* args)
51 {
52 	TRACE("arch_vm_init_end(): %" B_PRIu32 " virtual ranges to keep:\n",
53 		args->arch_args.num_virtual_ranges_to_keep);
54 
55 	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
56 		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
57 
58 		TRACE("  start: %p, size: %#" B_PRIxSIZE "\n", (void*)range.start, range.size);
59 
60 		// skip ranges outside the kernel address space
61 		if (!IS_KERNEL_ADDRESS(range.start)) {
62 			TRACE("    no kernel address, skipping...\n");
63 			continue;
64 		}
65 
66 		phys_addr_t physicalAddress;
67 		void *address = (void*)range.start;
68 		if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
69 			&physicalAddress) != B_OK)
70 			panic("arch_vm_init_end(): No page mapping for %p\n", address);
71 
72 		area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
73 			"boot loader reserved area", &address,
74 			B_EXACT_ADDRESS, range.size,
75 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
76 			physicalAddress, true);
77 
78 		if (area < 0) {
79 			panic("arch_vm_init_end(): Failed to create area for boot loader "
80 				"reserved area: %p - %p\n", (void*)range.start,
81 				(void*)(range.start + range.size));
82 		}
83 	}
84 
85 	return B_OK;
86 }
87 
88 
89 status_t
90 arch_vm_init_post_modules(kernel_args* args)
91 {
92 	TRACE("arch_vm_init_post_modules\n");
93 	return B_OK;
94 }
95 
96 
97 void
98 arch_vm_aspace_swap(struct VMAddressSpace* from, struct VMAddressSpace* to)
99 {
100 	VMSAv8TranslationMap* fromMap = (VMSAv8TranslationMap*)from->TranslationMap();
101 	VMSAv8TranslationMap* toMap = (VMSAv8TranslationMap*)to->TranslationMap();
102 	if (fromMap != toMap)
103 		VMSAv8TranslationMap::SwitchUserMap(fromMap, toMap);
104 }
105 
106 
107 bool
108 arch_vm_supports_protection(uint32 protection)
109 {
110 	// User-RO/Kernel-RW is not possible
111 	if ((protection & B_READ_AREA) != 0 && (protection & B_WRITE_AREA) == 0
112 		&& (protection & B_KERNEL_WRITE_AREA) != 0) {
113 		return false;
114 	}
115 
116 	// User-Execute implies User-Read, because it would break PAN otherwise
117 	if ((protection & B_EXECUTE_AREA) != 0
118 	    && (protection & B_READ_AREA) == 0) {
119 		return false;
120 	}
121 
122 	return true;
123 }
124 
125 
126 void
127 arch_vm_unset_memory_type(VMArea* area)
128 {
129 }
130 
131 
132 status_t
133 arch_vm_set_memory_type(VMArea* area, phys_addr_t physicalBase, uint32 type,
134 	uint32 *effectiveType)
135 {
136 	// Memory type is set in page tables during mapping,
137 	// no need to do anything more here.
138 	return B_OK;
139 }
140