xref: /haiku/src/system/kernel/arch/sparc/arch_vm.cpp (revision 8271b4a8bf725052a953e0f22e5306e0084f0b7e)
1a7c23bb4SPulkoMandy /*
2a7c23bb4SPulkoMandy  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
3a7c23bb4SPulkoMandy  * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
4a7c23bb4SPulkoMandy  * Copyright 2019, Adrien Destugues, pulkomandy@pulkomandy.tk
5a7c23bb4SPulkoMandy  * Distributed under the terms of the MIT License.
6a7c23bb4SPulkoMandy  */
7a7c23bb4SPulkoMandy 
8a7c23bb4SPulkoMandy #include <vm/vm.h>
9a7c23bb4SPulkoMandy #include <vm/VMAddressSpace.h>
10a7c23bb4SPulkoMandy #include <arch/vm.h>
11a7c23bb4SPulkoMandy 
12a7c23bb4SPulkoMandy 
13a7c23bb4SPulkoMandy //#define TRACE_ARCH_VM
14a7c23bb4SPulkoMandy #ifdef TRACE_ARCH_VM
15a7c23bb4SPulkoMandy #	define TRACE(x) dprintf x
16a7c23bb4SPulkoMandy #else
17a7c23bb4SPulkoMandy #	define TRACE(x) ;
18a7c23bb4SPulkoMandy #endif
19a7c23bb4SPulkoMandy 
20a7c23bb4SPulkoMandy 
21a7c23bb4SPulkoMandy status_t
arch_vm_init(kernel_args * args)22a7c23bb4SPulkoMandy arch_vm_init(kernel_args *args)
23a7c23bb4SPulkoMandy {
24a7c23bb4SPulkoMandy 	return B_OK;
25a7c23bb4SPulkoMandy }
26a7c23bb4SPulkoMandy 
27a7c23bb4SPulkoMandy 
28a7c23bb4SPulkoMandy status_t
arch_vm_init_post_area(kernel_args * args)29a7c23bb4SPulkoMandy arch_vm_init_post_area(kernel_args *args)
30a7c23bb4SPulkoMandy {
31a7c23bb4SPulkoMandy 	return B_OK;
32a7c23bb4SPulkoMandy }
33a7c23bb4SPulkoMandy 
34a7c23bb4SPulkoMandy 
35a7c23bb4SPulkoMandy status_t
arch_vm_init_post_modules(kernel_args * args)36a7c23bb4SPulkoMandy arch_vm_init_post_modules(kernel_args *args)
37a7c23bb4SPulkoMandy {
38a7c23bb4SPulkoMandy 	return B_OK;
39a7c23bb4SPulkoMandy }
40a7c23bb4SPulkoMandy 
41a7c23bb4SPulkoMandy 
42a7c23bb4SPulkoMandy status_t
arch_vm_init_end(kernel_args * args)43a7c23bb4SPulkoMandy arch_vm_init_end(kernel_args *args)
44a7c23bb4SPulkoMandy {
45a7c23bb4SPulkoMandy 	TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n",
46a7c23bb4SPulkoMandy 		args->arch_args.num_virtual_ranges_to_keep));
47a7c23bb4SPulkoMandy 
48a7c23bb4SPulkoMandy 	for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
49a7c23bb4SPulkoMandy 		addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
50a7c23bb4SPulkoMandy 
51a7c23bb4SPulkoMandy 		TRACE(("  start: %p, size: 0x%lx\n", (void*)range.start, range.size));
52a7c23bb4SPulkoMandy 
53a7c23bb4SPulkoMandy #if 0
54a7c23bb4SPulkoMandy 		// skip ranges outside the kernel address space
55a7c23bb4SPulkoMandy 		if (!IS_KERNEL_ADDRESS(range.start)) {
56a7c23bb4SPulkoMandy 			TRACE(("    no kernel address, skipping...\n"));
57a7c23bb4SPulkoMandy 			continue;
58a7c23bb4SPulkoMandy 		}
59a7c23bb4SPulkoMandy 
60a7c23bb4SPulkoMandy 		phys_addr_t physicalAddress;
61a7c23bb4SPulkoMandy 		void *address = (void*)range.start;
62a7c23bb4SPulkoMandy 		if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
63a7c23bb4SPulkoMandy 				&physicalAddress) != B_OK)
64a7c23bb4SPulkoMandy 			panic("arch_vm_init_end(): No page mapping for %p\n", address);
65a7c23bb4SPulkoMandy 		area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
66a7c23bb4SPulkoMandy 			"boot loader reserved area", &address,
67a7c23bb4SPulkoMandy 			B_EXACT_ADDRESS, range.size,
68a7c23bb4SPulkoMandy 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
69a7c23bb4SPulkoMandy 			physicalAddress, true);
70a7c23bb4SPulkoMandy 		if (area < 0) {
71a7c23bb4SPulkoMandy 			panic("arch_vm_init_end(): Failed to create area for boot loader "
72a7c23bb4SPulkoMandy 				"reserved area: %p - %p\n", (void*)range.start,
73a7c23bb4SPulkoMandy 				(void*)(range.start + range.size));
74a7c23bb4SPulkoMandy 		}
75a7c23bb4SPulkoMandy #endif
76a7c23bb4SPulkoMandy 	}
77a7c23bb4SPulkoMandy 
78a7c23bb4SPulkoMandy #if 0
79a7c23bb4SPulkoMandy 	// Throw away any address space mappings we've inherited from the boot
80a7c23bb4SPulkoMandy 	// loader and have not yet turned into an area.
81a7c23bb4SPulkoMandy 	vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
82a7c23bb4SPulkoMandy #endif
83a7c23bb4SPulkoMandy 
84a7c23bb4SPulkoMandy 	return B_OK;
85a7c23bb4SPulkoMandy }
86a7c23bb4SPulkoMandy 
87a7c23bb4SPulkoMandy 
88a7c23bb4SPulkoMandy void
arch_vm_aspace_swap(struct VMAddressSpace * from,struct VMAddressSpace * to)89a7c23bb4SPulkoMandy arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
90a7c23bb4SPulkoMandy {
91a7c23bb4SPulkoMandy 	// This functions is only invoked when a userland thread is in the process
92a7c23bb4SPulkoMandy 	// of dying. It switches to the kernel team and does whatever cleanup is
93a7c23bb4SPulkoMandy 	// necessary (in case it is the team's main thread, it will delete the
94a7c23bb4SPulkoMandy 	// team).
95a7c23bb4SPulkoMandy 	// It is however not necessary to change the page directory. Userland team's
96a7c23bb4SPulkoMandy 	// page directories include all kernel mappings as well. Furthermore our
97a7c23bb4SPulkoMandy 	// arch specific translation map data objects are ref-counted, so they won't
98a7c23bb4SPulkoMandy 	// go away as long as they are still used on any CPU.
99a7c23bb4SPulkoMandy }
100a7c23bb4SPulkoMandy 
101a7c23bb4SPulkoMandy 
102a7c23bb4SPulkoMandy bool
arch_vm_supports_protection(uint32 protection)103a7c23bb4SPulkoMandy arch_vm_supports_protection(uint32 protection)
104a7c23bb4SPulkoMandy {
105a7c23bb4SPulkoMandy 	return true;
106a7c23bb4SPulkoMandy }
107a7c23bb4SPulkoMandy 
108a7c23bb4SPulkoMandy 
109a7c23bb4SPulkoMandy void
arch_vm_unset_memory_type(VMArea * area)110a7c23bb4SPulkoMandy arch_vm_unset_memory_type(VMArea *area)
111a7c23bb4SPulkoMandy {
112a7c23bb4SPulkoMandy }
113a7c23bb4SPulkoMandy 
114a7c23bb4SPulkoMandy 
115a7c23bb4SPulkoMandy status_t
arch_vm_set_memory_type(VMArea * area,phys_addr_t physicalBase,uint32 type,uint32 * effectiveType)116*8271b4a8SMichael Lotz arch_vm_set_memory_type(VMArea *area, phys_addr_t physicalBase, uint32 type,
117*8271b4a8SMichael Lotz 	uint32 *effectiveType)
118a7c23bb4SPulkoMandy {
119a7c23bb4SPulkoMandy 	if (type == 0)
120a7c23bb4SPulkoMandy 		return B_OK;
121a7c23bb4SPulkoMandy 
122a7c23bb4SPulkoMandy 	return B_ERROR;
123a7c23bb4SPulkoMandy }
124