xref: /haiku/headers/private/kernel/vm/VMTranslationMap.h (revision 220d04022750f40f8bac8f01fa551211e28d04f2)
1 /*
2  * Copyright 2002-2010, Haiku. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 #ifndef KERNEL_VM_VM_TRANSLATION_MAP_H
9 #define KERNEL_VM_VM_TRANSLATION_MAP_H
10 
11 
12 #include <kernel.h>
13 #include <lock.h>
14 
15 #include <vm/VMArea.h>
16 
17 
18 struct kernel_args;
19 struct vm_page_reservation;
20 
21 
22 struct VMTranslationMap {
23 			struct ReverseMappingInfoCallback;
24 
25 public:
26 								VMTranslationMap();
27 	virtual						~VMTranslationMap();
28 
29 	virtual	bool	 			Lock() = 0;
30 	virtual	void				Unlock() = 0;
31 
32 	virtual	addr_t				MappedSize() const = 0;
33 	virtual	size_t				MaxPagesNeededToMap(addr_t start,
34 									addr_t end) const = 0;
35 
36 	virtual	status_t			Map(addr_t virtualAddress,
37 									phys_addr_t physicalAddress,
38 									uint32 attributes, uint32 memoryType,
39 									vm_page_reservation* reservation) = 0;
40 	virtual	status_t			Unmap(addr_t start, addr_t end) = 0;
41 
42 	virtual	status_t			DebugMarkRangePresent(addr_t start, addr_t end,
43 									bool markPresent);
44 
45 	// map not locked
46 	virtual	status_t			UnmapPage(VMArea* area, addr_t address,
47 									bool updatePageQueue) = 0;
48 	virtual	void				UnmapPages(VMArea* area, addr_t base,
49 									size_t size, bool updatePageQueue);
50 	virtual	void				UnmapArea(VMArea* area,
51 									bool deletingAddressSpace,
52 									bool ignoreTopCachePageFlags);
53 
54 	virtual	status_t			Query(addr_t virtualAddress,
55 									phys_addr_t* _physicalAddress,
56 									uint32* _flags) = 0;
57 	virtual	status_t			QueryInterrupt(addr_t virtualAddress,
58 									phys_addr_t* _physicalAddress,
59 									uint32* _flags) = 0;
60 
61 	virtual	status_t			Protect(addr_t base, addr_t top,
62 									uint32 attributes, uint32 memoryType) = 0;
63 			status_t			ProtectPage(VMArea* area, addr_t address,
64 									uint32 attributes);
65 			status_t			ProtectArea(VMArea* area,
66 									uint32 attributes);
67 
68 	virtual	status_t			ClearFlags(addr_t virtualAddress,
69 									uint32 flags) = 0;
70 
71 	virtual	bool				ClearAccessedAndModified(
72 									VMArea* area, addr_t address,
73 									bool unmapIfUnaccessed,
74 									bool& _modified) = 0;
75 
76 	virtual	void				Flush() = 0;
77 
78 	// backends for KDL commands
79 	virtual	void				DebugPrintMappingInfo(addr_t virtualAddress);
80 	virtual	bool				DebugGetReverseMappingInfo(
81 									phys_addr_t physicalAddress,
82 									ReverseMappingInfoCallback& callback);
83 
84 protected:
85 			void				PageUnmapped(VMArea* area,
86 									page_num_t pageNumber, bool accessed,
87 									bool modified, bool updatePageQueue);
88 			void				UnaccessedPageUnmapped(VMArea* area,
89 									page_num_t pageNumber);
90 
91 protected:
92 			recursive_lock		fLock;
93 			int32				fMapCount;
94 };
95 
96 
97 struct VMTranslationMap::ReverseMappingInfoCallback {
98 	virtual						~ReverseMappingInfoCallback();
99 
100 	virtual	bool				HandleVirtualAddress(addr_t virtualAddress) = 0;
101 };
102 
103 
104 struct VMPhysicalPageMapper {
105 								VMPhysicalPageMapper();
106 	virtual						~VMPhysicalPageMapper();
107 
108 	// get/put virtual address for physical page -- will be usuable on all CPUs
109 	// (usually more expensive than the *_current_cpu() versions)
110 	virtual	status_t			GetPage(phys_addr_t physicalAddress,
111 									addr_t* _virtualAddress,
112 									void** _handle) = 0;
113 	virtual	status_t			PutPage(addr_t virtualAddress,
114 									void* handle) = 0;
115 
116 	// get/put virtual address for physical page -- thread must be pinned the
117 	// whole time
118 	virtual	status_t			GetPageCurrentCPU(
119 									phys_addr_t physicalAddress,
120 									addr_t* _virtualAddress,
121 									void** _handle) = 0;
122 	virtual	status_t			PutPageCurrentCPU(addr_t virtualAddress,
123 									void* _handle) = 0;
124 
125 	// get/put virtual address for physical in KDL
126 	virtual	status_t			GetPageDebug(phys_addr_t physicalAddress,
127 									addr_t* _virtualAddress,
128 									void** _handle) = 0;
129 	virtual	status_t			PutPageDebug(addr_t virtualAddress,
130 									void* handle) = 0;
131 
132 	// memory operations on pages
133 	virtual	status_t			MemsetPhysical(phys_addr_t address, int value,
134 									phys_size_t length) = 0;
135 	virtual	status_t			MemcpyFromPhysical(void* to, phys_addr_t from,
136 									size_t length, bool user) = 0;
137 	virtual	status_t			MemcpyToPhysical(phys_addr_t to,
138 									const void* from, size_t length,
139 									bool user) = 0;
140 	virtual	void				MemcpyPhysicalPage(phys_addr_t to,
141 									phys_addr_t from) = 0;
142 };
143 
144 
145 
146 inline status_t
147 VMTranslationMap::ProtectPage(VMArea* area, addr_t address, uint32 attributes)
148 {
149 	return Protect(address, address + B_PAGE_SIZE - 1, attributes,
150 		area->MemoryType());
151 }
152 
153 
154 #include <vm/VMArea.h>
155 inline status_t
156 VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
157 {
158 	return Protect(area->Base(), area->Base() + area->Size() - 1, attributes,
159 		area->MemoryType());
160 }
161 
162 
163 #include <arch/vm_translation_map.h>
164 
165 #endif	/* KERNEL_VM_VM_TRANSLATION_MAP_H */
166