xref: /haiku/headers/private/kernel/vm/VMCache.h (revision 45bd7bb3db9d9e4dcb02b89a3e7c2bf382c0a88c)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_VM_VM_CACHE_H
10 #define _KERNEL_VM_VM_CACHE_H
11 
12 
13 #include <debug.h>
14 #include <kernel.h>
15 #include <vm/vm.h>
16 #include <vm/vm_types.h>
17 
18 #include "kernel_debug_config.h"
19 
20 
21 struct kernel_args;
22 
23 
24 enum {
25 	CACHE_TYPE_RAM = 0,
26 	CACHE_TYPE_VNODE,
27 	CACHE_TYPE_DEVICE,
28 	CACHE_TYPE_NULL
29 };
30 
31 enum {
32 	PAGE_EVENT_NOT_BUSY	= 0x01		// page not busy anymore
33 };
34 
35 
36 struct VMCachePagesTreeDefinition {
37 	typedef page_num_t KeyType;
38 	typedef	vm_page NodeType;
39 
40 	static page_num_t GetKey(const NodeType* node)
41 	{
42 		return node->cache_offset;
43 	}
44 
45 	static SplayTreeLink<NodeType>* GetLink(NodeType* node)
46 	{
47 		return &node->cache_link;
48 	}
49 
50 	static int Compare(page_num_t key, const NodeType* node)
51 	{
52 		return key == node->cache_offset ? 0
53 			: (key < node->cache_offset ? -1 : 1);
54 	}
55 
56 	static NodeType** GetListLink(NodeType* node)
57 	{
58 		return &node->cache_next;
59 	}
60 };
61 
62 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
63 
64 
65 struct VMCache {
66 public:
67 								VMCache();
68 	virtual						~VMCache();
69 
70 			status_t			Init(uint32 cacheType, uint32 allocationFlags);
71 
72 	virtual	void				Delete();
73 
74 	inline	bool				Lock();
75 	inline	bool				TryLock();
76 	inline	bool				SwitchLock(mutex* from);
77 	inline	bool				SwitchFromReadLock(rw_lock* from);
78 			void				Unlock(bool consumerLocked = false);
79 	inline	void				AssertLocked();
80 
81 	inline	void				AcquireRefLocked();
82 	inline	void				AcquireRef();
83 	inline	void				ReleaseRefLocked();
84 	inline	void				ReleaseRef();
85 	inline	void				ReleaseRefAndUnlock(
86 									bool consumerLocked = false);
87 
88 	inline	VMCacheRef*			CacheRef() const	{ return fCacheRef; }
89 
90 			void				WaitForPageEvents(vm_page* page, uint32 events,
91 									bool relock);
92 			void				NotifyPageEvents(vm_page* page, uint32 events)
93 									{ if (fPageEventWaiters != NULL)
94 										_NotifyPageEvents(page, events); }
95 	inline	void				MarkPageUnbusy(vm_page* page);
96 
97 			vm_page*			LookupPage(off_t offset);
98 			void				InsertPage(vm_page* page, off_t offset);
99 			void				RemovePage(vm_page* page);
100 			void				MovePage(vm_page* page);
101 			void				MoveAllPages(VMCache* fromCache);
102 
103 			void				AddConsumer(VMCache* consumer);
104 
105 			status_t			InsertAreaLocked(VMArea* area);
106 			status_t			RemoveArea(VMArea* area);
107 			void				TransferAreas(VMCache* fromCache);
108 			uint32				CountWritableAreas(VMArea* ignoreArea) const;
109 
110 			status_t			WriteModified();
111 			status_t			SetMinimalCommitment(off_t commitment,
112 									int priority);
113 	virtual	status_t			Resize(off_t newSize, int priority);
114 
115 			status_t			FlushAndRemoveAllPages();
116 
117 			void*				UserData()	{ return fUserData; }
118 			void				SetUserData(void* data)	{ fUserData = data; }
119 									// Settable by the lock owner and valid as
120 									// long as the lock is owned.
121 
122 			// for debugging only
123 			int32				RefCount() const
124 									{ return fRefCount; }
125 
126 	// backing store operations
127 	virtual	status_t			Commit(off_t size, int priority);
128 	virtual	bool				HasPage(off_t offset);
129 
130 	virtual	status_t			Read(off_t offset, const generic_io_vec *vecs,
131 									size_t count,uint32 flags,
132 									generic_size_t *_numBytes);
133 	virtual	status_t			Write(off_t offset, const generic_io_vec *vecs,
134 									size_t count, uint32 flags,
135 									generic_size_t *_numBytes);
136 	virtual	status_t			WriteAsync(off_t offset,
137 									const generic_io_vec* vecs, size_t count,
138 									generic_size_t numBytes, uint32 flags,
139 									AsyncIOCallback* callback);
140 	virtual	bool				CanWritePage(off_t offset);
141 
142 	virtual	int32				MaxPagesPerWrite() const
143 									{ return -1; } // no restriction
144 	virtual	int32				MaxPagesPerAsyncWrite() const
145 									{ return -1; } // no restriction
146 
147 	virtual	status_t			Fault(struct VMAddressSpace *aspace,
148 									off_t offset);
149 
150 	virtual	void				Merge(VMCache* source);
151 
152 	virtual	status_t			AcquireUnreferencedStoreRef();
153 	virtual	void				AcquireStoreRef();
154 	virtual	void				ReleaseStoreRef();
155 
156 	virtual	bool				DebugHasPage(off_t offset);
157 			vm_page*			DebugLookupPage(off_t offset);
158 
159 	virtual	void				Dump(bool showPages) const;
160 
161 public:
162 			VMArea*				areas;
163 			list_link			consumer_link;
164 			list				consumers;
165 				// list of caches that use this cache as a source
166 			VMCachePagesTree	pages;
167 			VMCache*			source;
168 			off_t				virtual_base;
169 			off_t				virtual_end;
170 			off_t				committed_size;
171 				// TODO: Remove!
172 			uint32				page_count;
173 			uint32				temporary : 1;
174 			uint32				type : 6;
175 
176 #if DEBUG_CACHE_LIST
177 			VMCache*			debug_previous;
178 			VMCache*			debug_next;
179 #endif
180 
181 private:
182 			struct PageEventWaiter;
183 			friend struct VMCacheRef;
184 
185 private:
186 			void				_NotifyPageEvents(vm_page* page, uint32 events);
187 
188 	inline	bool				_IsMergeable() const;
189 
190 			void				_MergeWithOnlyConsumer(bool consumerLocked);
191 			void				_RemoveConsumer(VMCache* consumer);
192 
193 private:
194 			int32				fRefCount;
195 			mutex				fLock;
196 			PageEventWaiter*	fPageEventWaiters;
197 			void*				fUserData;
198 			VMCacheRef*			fCacheRef;
199 };
200 
201 
202 #if DEBUG_CACHE_LIST
203 extern VMCache* gDebugCacheList;
204 #endif
205 
206 
207 class VMCacheFactory {
208 public:
209 	static	status_t		CreateAnonymousCache(VMCache*& cache,
210 								bool canOvercommit, int32 numPrecommittedPages,
211 								int32 numGuardPages, bool swappable,
212 								int priority);
213 	static	status_t		CreateVnodeCache(VMCache*& cache,
214 								struct vnode* vnode);
215 	static	status_t		CreateDeviceCache(VMCache*& cache,
216 								addr_t baseAddress);
217 	static	status_t		CreateNullCache(int priority, VMCache*& cache);
218 };
219 
220 
221 
222 bool
223 VMCache::Lock()
224 {
225 	return mutex_lock(&fLock) == B_OK;
226 }
227 
228 
229 bool
230 VMCache::TryLock()
231 {
232 	return mutex_trylock(&fLock) == B_OK;
233 }
234 
235 
236 bool
237 VMCache::SwitchLock(mutex* from)
238 {
239 	return mutex_switch_lock(from, &fLock) == B_OK;
240 }
241 
242 
243 bool
244 VMCache::SwitchFromReadLock(rw_lock* from)
245 {
246 	return mutex_switch_from_read_lock(from, &fLock) == B_OK;
247 }
248 
249 
250 void
251 VMCache::AssertLocked()
252 {
253 	ASSERT_LOCKED_MUTEX(&fLock);
254 }
255 
256 
257 void
258 VMCache::AcquireRefLocked()
259 {
260 	ASSERT_LOCKED_MUTEX(&fLock);
261 
262 	fRefCount++;
263 }
264 
265 
266 void
267 VMCache::AcquireRef()
268 {
269 	Lock();
270 	fRefCount++;
271 	Unlock();
272 }
273 
274 
275 void
276 VMCache::ReleaseRefLocked()
277 {
278 	ASSERT_LOCKED_MUTEX(&fLock);
279 
280 	fRefCount--;
281 }
282 
283 
284 void
285 VMCache::ReleaseRef()
286 {
287 	Lock();
288 	fRefCount--;
289 	Unlock();
290 }
291 
292 
293 void
294 VMCache::ReleaseRefAndUnlock(bool consumerLocked)
295 {
296 	ReleaseRefLocked();
297 	Unlock(consumerLocked);
298 }
299 
300 
301 void
302 VMCache::MarkPageUnbusy(vm_page* page)
303 {
304 	ASSERT(page->busy);
305 	page->busy = false;
306 	NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
307 }
308 
309 
310 #ifdef __cplusplus
311 extern "C" {
312 #endif
313 
314 status_t vm_cache_init(struct kernel_args *args);
315 void vm_cache_init_post_heap();
316 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
317 	bool dontWait);
318 
319 #ifdef __cplusplus
320 }
321 #endif
322 
323 
324 #endif	/* _KERNEL_VM_VM_CACHE_H */
325