xref: /haiku/headers/private/kernel/vm/VMCache.h (revision 481f986b59e7782458dcc5fe98ad59a57480e5db)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_VM_VM_CACHE_H
10 #define _KERNEL_VM_VM_CACHE_H
11 
12 
13 #include <debug.h>
14 #include <kernel.h>
15 #include <util/list.h>
16 #include <vm/vm.h>
17 #include <vm/vm_types.h>
18 
19 #include "kernel_debug_config.h"
20 
21 
22 struct kernel_args;
23 
24 
25 enum {
26 	CACHE_TYPE_RAM = 0,
27 	CACHE_TYPE_VNODE,
28 	CACHE_TYPE_DEVICE,
29 	CACHE_TYPE_NULL
30 };
31 
32 enum {
33 	PAGE_EVENT_NOT_BUSY	= 0x01		// page not busy anymore
34 };
35 
36 
37 struct VMCachePagesTreeDefinition {
38 	typedef page_num_t KeyType;
39 	typedef	vm_page NodeType;
40 
41 	static page_num_t GetKey(const NodeType* node)
42 	{
43 		return node->cache_offset;
44 	}
45 
46 	static SplayTreeLink<NodeType>* GetLink(NodeType* node)
47 	{
48 		return &node->cache_link;
49 	}
50 
51 	static int Compare(page_num_t key, const NodeType* node)
52 	{
53 		return key == node->cache_offset ? 0
54 			: (key < node->cache_offset ? -1 : 1);
55 	}
56 
57 	static NodeType** GetListLink(NodeType* node)
58 	{
59 		return &node->cache_next;
60 	}
61 };
62 
63 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
64 
65 
66 struct VMCache {
67 public:
68 								VMCache();
69 	virtual						~VMCache();
70 
71 			status_t			Init(uint32 cacheType, uint32 allocationFlags);
72 
73 	virtual	void				Delete();
74 
75 	inline	bool				Lock();
76 	inline	bool				TryLock();
77 	inline	bool				SwitchLock(mutex* from);
78 	inline	bool				SwitchFromReadLock(rw_lock* from);
79 			void				Unlock(bool consumerLocked = false);
80 	inline	void				AssertLocked();
81 
82 	inline	void				AcquireRefLocked();
83 	inline	void				AcquireRef();
84 	inline	void				ReleaseRefLocked();
85 	inline	void				ReleaseRef();
86 	inline	void				ReleaseRefAndUnlock(
87 									bool consumerLocked = false);
88 
89 	inline	VMCacheRef*			CacheRef() const	{ return fCacheRef; }
90 
91 			void				WaitForPageEvents(vm_page* page, uint32 events,
92 									bool relock);
93 			void				NotifyPageEvents(vm_page* page, uint32 events)
94 									{ if (fPageEventWaiters != NULL)
95 										_NotifyPageEvents(page, events); }
96 	inline	void				MarkPageUnbusy(vm_page* page);
97 
98 			vm_page*			LookupPage(off_t offset);
99 			void				InsertPage(vm_page* page, off_t offset);
100 			void				RemovePage(vm_page* page);
101 			void				MovePage(vm_page* page);
102 			void				MoveAllPages(VMCache* fromCache);
103 
104 	inline	page_num_t			WiredPagesCount() const;
105 	inline	void				IncrementWiredPagesCount();
106 	inline	void				DecrementWiredPagesCount();
107 
108 			void				AddConsumer(VMCache* consumer);
109 
110 			status_t			InsertAreaLocked(VMArea* area);
111 			status_t			RemoveArea(VMArea* area);
112 			void				TransferAreas(VMCache* fromCache);
113 			uint32				CountWritableAreas(VMArea* ignoreArea) const;
114 
115 			status_t			WriteModified();
116 			status_t			SetMinimalCommitment(off_t commitment,
117 									int priority);
118 	virtual	status_t			Resize(off_t newSize, int priority);
119 
120 			status_t			FlushAndRemoveAllPages();
121 
122 			void*				UserData()	{ return fUserData; }
123 			void				SetUserData(void* data)	{ fUserData = data; }
124 									// Settable by the lock owner and valid as
125 									// long as the lock is owned.
126 
127 			// for debugging only
128 			int32				RefCount() const
129 									{ return fRefCount; }
130 
131 	// backing store operations
132 	virtual	status_t			Commit(off_t size, int priority);
133 	virtual	bool				HasPage(off_t offset);
134 
135 	virtual	status_t			Read(off_t offset, const generic_io_vec *vecs,
136 									size_t count,uint32 flags,
137 									generic_size_t *_numBytes);
138 	virtual	status_t			Write(off_t offset, const generic_io_vec *vecs,
139 									size_t count, uint32 flags,
140 									generic_size_t *_numBytes);
141 	virtual	status_t			WriteAsync(off_t offset,
142 									const generic_io_vec* vecs, size_t count,
143 									generic_size_t numBytes, uint32 flags,
144 									AsyncIOCallback* callback);
145 	virtual	bool				CanWritePage(off_t offset);
146 
147 	virtual	int32				MaxPagesPerWrite() const
148 									{ return -1; } // no restriction
149 	virtual	int32				MaxPagesPerAsyncWrite() const
150 									{ return -1; } // no restriction
151 
152 	virtual	status_t			Fault(struct VMAddressSpace *aspace,
153 									off_t offset);
154 
155 	virtual	void				Merge(VMCache* source);
156 
157 	virtual	status_t			AcquireUnreferencedStoreRef();
158 	virtual	void				AcquireStoreRef();
159 	virtual	void				ReleaseStoreRef();
160 
161 	virtual	bool				DebugHasPage(off_t offset);
162 			vm_page*			DebugLookupPage(off_t offset);
163 
164 	virtual	void				Dump(bool showPages) const;
165 
166 public:
167 			VMArea*				areas;
168 			list_link			consumer_link;
169 			list				consumers;
170 				// list of caches that use this cache as a source
171 			VMCachePagesTree	pages;
172 			VMCache*			source;
173 			off_t				virtual_base;
174 			off_t				virtual_end;
175 			off_t				committed_size;
176 				// TODO: Remove!
177 			uint32				page_count;
178 			uint32				temporary : 1;
179 			uint32				type : 6;
180 
181 #if DEBUG_CACHE_LIST
182 			VMCache*			debug_previous;
183 			VMCache*			debug_next;
184 #endif
185 
186 private:
187 			struct PageEventWaiter;
188 			friend struct VMCacheRef;
189 
190 private:
191 			void				_NotifyPageEvents(vm_page* page, uint32 events);
192 
193 	inline	bool				_IsMergeable() const;
194 
195 			void				_MergeWithOnlyConsumer(bool consumerLocked);
196 			void				_RemoveConsumer(VMCache* consumer);
197 
198 private:
199 			int32				fRefCount;
200 			mutex				fLock;
201 			PageEventWaiter*	fPageEventWaiters;
202 			void*				fUserData;
203 			VMCacheRef*			fCacheRef;
204 			page_num_t			fWiredPagesCount;
205 };
206 
207 
208 #if DEBUG_CACHE_LIST
209 extern VMCache* gDebugCacheList;
210 #endif
211 
212 
213 class VMCacheFactory {
214 public:
215 	static	status_t		CreateAnonymousCache(VMCache*& cache,
216 								bool canOvercommit, int32 numPrecommittedPages,
217 								int32 numGuardPages, bool swappable,
218 								int priority);
219 	static	status_t		CreateVnodeCache(VMCache*& cache,
220 								struct vnode* vnode);
221 	static	status_t		CreateDeviceCache(VMCache*& cache,
222 								addr_t baseAddress);
223 	static	status_t		CreateNullCache(int priority, VMCache*& cache);
224 };
225 
226 
227 
228 bool
229 VMCache::Lock()
230 {
231 	return mutex_lock(&fLock) == B_OK;
232 }
233 
234 
235 bool
236 VMCache::TryLock()
237 {
238 	return mutex_trylock(&fLock) == B_OK;
239 }
240 
241 
242 bool
243 VMCache::SwitchLock(mutex* from)
244 {
245 	return mutex_switch_lock(from, &fLock) == B_OK;
246 }
247 
248 
249 bool
250 VMCache::SwitchFromReadLock(rw_lock* from)
251 {
252 	return mutex_switch_from_read_lock(from, &fLock) == B_OK;
253 }
254 
255 
256 void
257 VMCache::AssertLocked()
258 {
259 	ASSERT_LOCKED_MUTEX(&fLock);
260 }
261 
262 
263 void
264 VMCache::AcquireRefLocked()
265 {
266 	ASSERT_LOCKED_MUTEX(&fLock);
267 
268 	fRefCount++;
269 }
270 
271 
272 void
273 VMCache::AcquireRef()
274 {
275 	Lock();
276 	fRefCount++;
277 	Unlock();
278 }
279 
280 
281 void
282 VMCache::ReleaseRefLocked()
283 {
284 	ASSERT_LOCKED_MUTEX(&fLock);
285 
286 	fRefCount--;
287 }
288 
289 
290 void
291 VMCache::ReleaseRef()
292 {
293 	Lock();
294 	fRefCount--;
295 	Unlock();
296 }
297 
298 
299 void
300 VMCache::ReleaseRefAndUnlock(bool consumerLocked)
301 {
302 	ReleaseRefLocked();
303 	Unlock(consumerLocked);
304 }
305 
306 
307 void
308 VMCache::MarkPageUnbusy(vm_page* page)
309 {
310 	ASSERT(page->busy);
311 	page->busy = false;
312 	NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
313 }
314 
315 
316 page_num_t
317 VMCache::WiredPagesCount() const
318 {
319 	return fWiredPagesCount;
320 }
321 
322 
323 void
324 VMCache::IncrementWiredPagesCount()
325 {
326 	ASSERT(fWiredPagesCount < page_count);
327 
328 	fWiredPagesCount++;
329 }
330 
331 
332 void
333 VMCache::DecrementWiredPagesCount()
334 {
335 	ASSERT(fWiredPagesCount > 0);
336 
337 	fWiredPagesCount--;
338 }
339 
340 
341 // vm_page methods implemented here to avoid VMCache.h inclusion in vm_types.h
342 
343 inline void
344 vm_page::IncrementWiredCount()
345 {
346 	if (fWiredCount++ == 0)
347 		cache_ref->cache->IncrementWiredPagesCount();
348 }
349 
350 
351 inline void
352 vm_page::DecrementWiredCount()
353 {
354 	if (--fWiredCount == 0)
355 		cache_ref->cache->DecrementWiredPagesCount();
356 }
357 
358 
359 #ifdef __cplusplus
360 extern "C" {
361 #endif
362 
363 status_t vm_cache_init(struct kernel_args *args);
364 void vm_cache_init_post_heap();
365 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
366 	bool dontWait);
367 
368 #ifdef __cplusplus
369 }
370 #endif
371 
372 
373 #endif	/* _KERNEL_VM_VM_CACHE_H */
374