xref: /haiku/src/system/kernel/arch/sparc/arch_vm.cpp (revision 9a6a20d4689307142a7ed26a1437ba47e244e73f)
1 /*
2  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
3  * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
4  * Copyright 2019, Adrien Destugues, pulkomandy@pulkomandy.tk
5  * Distributed under the terms of the MIT License.
6  */
7 
8 #include <vm/vm.h>
9 #include <vm/VMAddressSpace.h>
10 #include <arch/vm.h>
11 
12 
13 //#define TRACE_ARCH_VM
14 #ifdef TRACE_ARCH_VM
15 #	define TRACE(x) dprintf x
16 #else
17 #	define TRACE(x) ;
18 #endif
19 
20 
21 status_t
22 arch_vm_init(kernel_args *args)
23 {
24 	return B_OK;
25 }
26 
27 
28 status_t
29 arch_vm_init_post_area(kernel_args *args)
30 {
31 	return B_OK;
32 }
33 
34 
35 status_t
36 arch_vm_init_post_modules(kernel_args *args)
37 {
38 	return B_OK;
39 }
40 
41 
42 status_t
43 arch_vm_init_end(kernel_args *args)
44 {
45 	TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n",
46 		args->arch_args.num_virtual_ranges_to_keep));
47 
48 	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
49 		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
50 
51 		TRACE(("  start: %p, size: 0x%lx\n", (void*)range.start, range.size));
52 
53 #if 0
54 		// skip ranges outside the kernel address space
55 		if (!IS_KERNEL_ADDRESS(range.start)) {
56 			TRACE(("    no kernel address, skipping...\n"));
57 			continue;
58 		}
59 
60 		phys_addr_t physicalAddress;
61 		void *address = (void*)range.start;
62 		if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
63 				&physicalAddress) != B_OK)
64 			panic("arch_vm_init_end(): No page mapping for %p\n", address);
65 		area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
66 			"boot loader reserved area", &address,
67 			B_EXACT_ADDRESS, range.size,
68 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
69 			physicalAddress, true);
70 		if (area < 0) {
71 			panic("arch_vm_init_end(): Failed to create area for boot loader "
72 				"reserved area: %p - %p\n", (void*)range.start,
73 				(void*)(range.start + range.size));
74 		}
75 #endif
76 	}
77 
78 #if 0
79 	// Throw away any address space mappings we've inherited from the boot
80 	// loader and have not yet turned into an area.
81 	vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
82 #endif
83 
84 	return B_OK;
85 }
86 
87 
88 void
89 arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
90 {
91 	// This functions is only invoked when a userland thread is in the process
92 	// of dying. It switches to the kernel team and does whatever cleanup is
93 	// necessary (in case it is the team's main thread, it will delete the
94 	// team).
95 	// It is however not necessary to change the page directory. Userland team's
96 	// page directories include all kernel mappings as well. Furthermore our
97 	// arch specific translation map data objects are ref-counted, so they won't
98 	// go away as long as they are still used on any CPU.
99 }
100 
101 
102 bool
103 arch_vm_supports_protection(uint32 protection)
104 {
105 	return true;
106 }
107 
108 
109 void
110 arch_vm_unset_memory_type(VMArea *area)
111 {
112 }
113 
114 
115 status_t
116 arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type,
117 	uint32 *effectiveType)
118 {
119 	if (type == 0)
120 		return B_OK;
121 
122 	return B_ERROR;
123 }
124