1 /*
2 * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9 #ifndef _KERNEL_VM_VM_TYPES_H
10 #define _KERNEL_VM_VM_TYPES_H
11
12
13 #include <new>
14
15 #include <AllocationTracking.h>
16 #include <arch/vm_types.h>
17 #include <condition_variable.h>
18 #include <kernel.h>
19 #include <lock.h>
20 #include <util/DoublyLinkedList.h>
21 #include <util/DoublyLinkedQueue.h>
22 #include <util/SplayTree.h>
23
24 #include <sys/uio.h>
25
26 #include "kernel_debug_config.h"
27
28
29 #define VM_PAGE_ALLOCATION_TRACKING_AVAILABLE \
30 (VM_PAGE_ALLOCATION_TRACKING && PAGE_ALLOCATION_TRACING != 0 \
31 && PAGE_ALLOCATION_TRACING_STACK_TRACE > 0)
32
33
34 class AsyncIOCallback;
35 struct vm_page_mapping;
36 struct VMCache;
37 struct VMCacheRef;
38 typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
39
40
41 struct virtual_address_restrictions {
42 void* address;
43 // base or exact address, depending on address_specification
44 uint32 address_specification;
45 // address specification as passed to create_area()
46 size_t alignment;
47 // address alignment; overridden when
48 // address_specification == B_ANY_KERNEL_BLOCK_ADDRESS
49 };
50
51 struct physical_address_restrictions {
52 phys_addr_t low_address;
53 // lowest acceptable address
54 phys_addr_t high_address;
55 // lowest no longer acceptable address; for ranges: the
56 // highest acceptable non-inclusive end address
57 phys_size_t alignment;
58 // address alignment
59 phys_size_t boundary;
60 // multiples of which may not be crossed by the address
61 // range
62 };
63
64
65 typedef struct vm_page_mapping {
66 vm_page_mapping_link page_link;
67 vm_page_mapping_link area_link;
68 struct vm_page *page;
69 struct VMArea *area;
70 } vm_page_mapping;
71
72 class DoublyLinkedPageLink {
73 public:
operator()74 inline vm_page_mapping_link *operator()(vm_page_mapping *element) const
75 {
76 return &element->page_link;
77 }
78
operator()79 inline const vm_page_mapping_link *operator()(
80 const vm_page_mapping *element) const
81 {
82 return &element->page_link;
83 }
84 };
85
86 class DoublyLinkedAreaLink {
87 public:
operator()88 inline vm_page_mapping_link *operator()(vm_page_mapping *element) const
89 {
90 return &element->area_link;
91 }
92
operator()93 inline const vm_page_mapping_link *operator()(
94 const vm_page_mapping *element) const
95 {
96 return &element->area_link;
97 }
98 };
99
100 typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedPageLink>
101 vm_page_mappings;
102 typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedAreaLink>
103 VMAreaMappings;
104
105 typedef phys_addr_t page_num_t;
106
107
108 struct VMCacheRef {
109 VMCache* cache;
110 int32 ref_count;
111
112 VMCacheRef(VMCache* cache);
113 };
114
115
116 struct vm_page {
117 DoublyLinkedListLink<vm_page> queue_link;
118
119 page_num_t physical_page_number;
120
121 private:
122 VMCacheRef* cache_ref;
123 public:
124 page_num_t cache_offset;
125 // in page size units
126
127 SplayTreeLink<vm_page> cache_link;
128 vm_page* cache_next;
129
130 vm_page_mappings mappings;
131
132 #if DEBUG_PAGE_QUEUE
133 void* queue;
134 #endif
135
136 #if DEBUG_PAGE_ACCESS
137 int32 accessing_thread;
138 #endif
139
140 #if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
141 AllocationTrackingInfo allocation_tracking_info;
142 #endif
143
144 private:
145 uint8 state : 3;
146 public:
147 bool busy : 1;
148 bool busy_writing : 1;
149 // used in VMAnonymousCache::Merge()
150 bool accessed : 1;
151 bool modified : 1;
152 uint8 _unused : 1;
153
154 uint8 usage_count;
155
156 inline void Init(page_num_t pageNumber);
157
CacheRefvm_page158 VMCacheRef* CacheRef() const { return cache_ref; }
SetCacheRefvm_page159 void SetCacheRef(VMCacheRef* cacheRef) { this->cache_ref = cacheRef; }
160
Cachevm_page161 VMCache* Cache() const
162 { return cache_ref != NULL ? cache_ref->cache : NULL; }
163
IsMappedvm_page164 bool IsMapped() const
165 { return fWiredCount > 0 || !mappings.IsEmpty(); }
166
Statevm_page167 uint8 State() const { return state; }
168 void InitState(uint8 newState);
169 void SetState(uint8 newState);
170
WiredCountvm_page171 inline uint16 WiredCount() const { return fWiredCount; }
172 inline void IncrementWiredCount();
173 inline void DecrementWiredCount();
174 // both implemented in VMCache.h to avoid inclusion here
175
176 private:
177 uint16 fWiredCount;
178 };
179
180
181 enum {
182 PAGE_STATE_ACTIVE = 0,
183 PAGE_STATE_INACTIVE,
184 PAGE_STATE_MODIFIED,
185 PAGE_STATE_CACHED,
186 PAGE_STATE_FREE,
187 PAGE_STATE_CLEAR,
188 PAGE_STATE_WIRED,
189 PAGE_STATE_UNUSED,
190
191 PAGE_STATE_COUNT,
192
193 PAGE_STATE_FIRST_UNQUEUED = PAGE_STATE_WIRED
194 };
195
196
197 #define VM_PAGE_ALLOC_STATE 0x00000007
198 #define VM_PAGE_ALLOC_CLEAR 0x00000010
199 #define VM_PAGE_ALLOC_BUSY 0x00000020
200
201
202 inline void
Init(page_num_t pageNumber)203 vm_page::Init(page_num_t pageNumber)
204 {
205 physical_page_number = pageNumber;
206 new(&mappings) vm_page_mappings();
207 SetCacheRef(NULL);
208
209 InitState(PAGE_STATE_FREE);
210 busy = busy_writing = false;
211 accessed = modified = false;
212 _unused = 0;
213 usage_count = 0;
214
215 fWiredCount = 0;
216
217 #if DEBUG_PAGE_QUEUE
218 queue = NULL;
219 #endif
220 #if DEBUG_PAGE_ACCESS
221 accessing_thread = -1;
222 #endif
223 }
224
225
226 #if DEBUG_PAGE_ACCESS
227 # include <thread.h>
228
229 static inline void
vm_page_debug_access_start(vm_page * page)230 vm_page_debug_access_start(vm_page* page)
231 {
232 thread_id threadID = thread_get_current_thread_id();
233 thread_id previousThread = atomic_test_and_set(&page->accessing_thread,
234 threadID, -1);
235 if (previousThread != -1) {
236 panic("Invalid concurrent access to page 0x%" B_PRIXPHYSADDR " (start), currently "
237 "accessed by: %" B_PRId32 "@! page -m %p; sc %" B_PRId32 "; cache _cache",
238 page->physical_page_number * B_PAGE_SIZE, previousThread, page, previousThread);
239 }
240 }
241
242
243 static inline void
vm_page_debug_access_end(vm_page * page)244 vm_page_debug_access_end(vm_page* page)
245 {
246 thread_id threadID = thread_get_current_thread_id();
247 thread_id previousThread = atomic_test_and_set(&page->accessing_thread, -1,
248 threadID);
249 if (previousThread != threadID) {
250 panic("Invalid concurrent access to page 0x%" B_PRIXPHYSADDR " (end) by "
251 "current thread, current accessor is: %" B_PRId32 "@! page -m %p; "
252 "sc %" B_PRId32 "; cache _cache", page->physical_page_number * B_PAGE_SIZE,
253 previousThread, page, previousThread);
254 }
255 }
256
257
258 static inline void
vm_page_debug_access_check(vm_page * page)259 vm_page_debug_access_check(vm_page* page)
260 {
261 thread_id thread = page->accessing_thread;
262 if (thread != thread_get_current_thread_id()) {
263 panic("Invalid concurrent access to page 0x%" B_PRIXPHYSADDR " (check), currently "
264 "accessed by: %" B_PRId32 "@! page -m %p; sc %" B_PRId32 "; cache _cache",
265 page->physical_page_number * B_PAGE_SIZE, thread, page, thread);
266 }
267 }
268
269
270 static inline void
vm_page_debug_access_transfer(vm_page * page,thread_id expectedPreviousThread)271 vm_page_debug_access_transfer(vm_page* page, thread_id expectedPreviousThread)
272 {
273 thread_id threadID = thread_get_current_thread_id();
274 thread_id previousThread = atomic_test_and_set(&page->accessing_thread,
275 threadID, expectedPreviousThread);
276 if (previousThread != expectedPreviousThread) {
277 panic("Invalid access transfer for page %p, currently accessed by: "
278 "%" B_PRId32 ", expected: %" B_PRId32, page, previousThread,
279 expectedPreviousThread);
280 }
281 }
282
283 # define DEBUG_PAGE_ACCESS_START(page) vm_page_debug_access_start(page)
284 # define DEBUG_PAGE_ACCESS_END(page) vm_page_debug_access_end(page)
285 # define DEBUG_PAGE_ACCESS_CHECK(page) vm_page_debug_access_check(page)
286 # define DEBUG_PAGE_ACCESS_TRANSFER(page, thread) \
287 vm_page_debug_access_transfer(page, thread)
288 #else
289 # define DEBUG_PAGE_ACCESS_START(page) do {} while (false)
290 # define DEBUG_PAGE_ACCESS_END(page) do {} while (false)
291 # define DEBUG_PAGE_ACCESS_CHECK(page) do {} while (false)
292 # define DEBUG_PAGE_ACCESS_TRANSFER(page, thread) do {} while (false)
293 #endif
294
295
296 #endif // _KERNEL_VM_VM_TYPES_H
297