xref: /haiku/headers/private/kernel/vm/VMCache.h (revision b4e5e4982360e684c5a13d227b9a958dbe725554)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_VM_VM_CACHE_H
10 #define _KERNEL_VM_VM_CACHE_H
11 
12 
13 #include <debug.h>
14 #include <kernel.h>
15 #include <vm/vm.h>
16 #include <vm/vm_types.h>
17 
18 #include "kernel_debug_config.h"
19 
20 
21 struct kernel_args;
22 
23 
24 enum {
25 	CACHE_TYPE_RAM = 0,
26 	CACHE_TYPE_VNODE,
27 	CACHE_TYPE_DEVICE,
28 	CACHE_TYPE_NULL
29 };
30 
31 enum {
32 	PAGE_EVENT_NOT_BUSY	= 0x01		// page not busy anymore
33 };
34 
35 
36 struct VMCachePagesTreeDefinition {
37 	typedef page_num_t KeyType;
38 	typedef	vm_page NodeType;
39 
40 	static page_num_t GetKey(const NodeType* node)
41 	{
42 		return node->cache_offset;
43 	}
44 
45 	static SplayTreeLink<NodeType>* GetLink(NodeType* node)
46 	{
47 		return &node->cache_link;
48 	}
49 
50 	static int Compare(page_num_t key, const NodeType* node)
51 	{
52 		return key == node->cache_offset ? 0
53 			: (key < node->cache_offset ? -1 : 1);
54 	}
55 
56 	static NodeType** GetListLink(NodeType* node)
57 	{
58 		return &node->cache_next;
59 	}
60 };
61 
62 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
63 
64 
65 struct VMCache {
66 public:
67 								VMCache();
68 	virtual						~VMCache();
69 
70 			status_t			Init(uint32 cacheType);
71 
72 	virtual	void				Delete();
73 
74 	inline	bool				Lock();
75 	inline	bool				TryLock();
76 	inline	bool				SwitchLock(mutex* from);
77 	inline	bool				SwitchFromReadLock(rw_lock* from);
78 			void				Unlock(bool consumerLocked = false);
79 	inline	void				AssertLocked();
80 
81 	inline	void				AcquireRefLocked();
82 	inline	void				AcquireRef();
83 	inline	void				ReleaseRefLocked();
84 	inline	void				ReleaseRef();
85 	inline	void				ReleaseRefAndUnlock(
86 									bool consumerLocked = false);
87 
88 			void				WaitForPageEvents(vm_page* page, uint32 events,
89 									bool relock);
90 			void				NotifyPageEvents(vm_page* page, uint32 events)
91 									{ if (fPageEventWaiters != NULL)
92 										_NotifyPageEvents(page, events); }
93 
94 			vm_page*			LookupPage(off_t offset);
95 			void				InsertPage(vm_page* page, off_t offset);
96 			void				RemovePage(vm_page* page);
97 			void				MovePage(vm_page* page);
98 			void				MoveAllPages(VMCache* fromCache);
99 
100 			void				AddConsumer(VMCache* consumer);
101 
102 			status_t			InsertAreaLocked(VMArea* area);
103 			status_t			RemoveArea(VMArea* area);
104 			void				TransferAreas(VMCache* fromCache);
105 			uint32				CountWritableAreas(VMArea* ignoreArea) const;
106 
107 			status_t			WriteModified();
108 			status_t			SetMinimalCommitment(off_t commitment);
109 			status_t			Resize(off_t newSize);
110 
111 			status_t			FlushAndRemoveAllPages();
112 
113 			void*				UserData()	{ return fUserData; }
114 			void				SetUserData(void* data)	{ fUserData = data; }
115 									// Settable by the lock owner and valid as
116 									// long as the lock is owned.
117 
118 			// for debugging only
119 			mutex*				GetLock()
120 									{ return &fLock; }
121 			int32				RefCount() const
122 									{ return fRefCount; }
123 
124 	// backing store operations
125 	virtual	status_t			Commit(off_t size);
126 	virtual	bool				HasPage(off_t offset);
127 
128 	virtual	status_t			Read(off_t offset, const iovec *vecs,
129 									size_t count,uint32 flags,
130 									size_t *_numBytes);
131 	virtual	status_t			Write(off_t offset, const iovec *vecs, size_t count,
132 									uint32 flags, size_t *_numBytes);
133 	virtual	status_t			WriteAsync(off_t offset, const iovec* vecs,
134 									size_t count, size_t numBytes, uint32 flags,
135 									AsyncIOCallback* callback);
136 	virtual	bool				CanWritePage(off_t offset);
137 
138 	virtual	int32				MaxPagesPerWrite() const
139 									{ return -1; } // no restriction
140 	virtual	int32				MaxPagesPerAsyncWrite() const
141 									{ return -1; } // no restriction
142 
143 	virtual	status_t			Fault(struct VMAddressSpace *aspace,
144 									off_t offset);
145 
146 	virtual	void				Merge(VMCache* source);
147 
148 	virtual	status_t			AcquireUnreferencedStoreRef();
149 	virtual	void				AcquireStoreRef();
150 	virtual	void				ReleaseStoreRef();
151 
152 public:
153 			VMArea*				areas;
154 			list_link			consumer_link;
155 			list				consumers;
156 				// list of caches that use this cache as a source
157 			VMCachePagesTree	pages;
158 			VMCache*			source;
159 			off_t				virtual_base;
160 			off_t				virtual_end;
161 			off_t				committed_size;
162 				// TODO: Remove!
163 			uint32				page_count;
164 			uint32				temporary : 1;
165 			uint32				scan_skip : 1;
166 			uint32				type : 6;
167 
168 #if DEBUG_CACHE_LIST
169 			VMCache*			debug_previous;
170 			VMCache*			debug_next;
171 #endif
172 
173 private:
174 			struct PageEventWaiter;
175 			friend struct VMCacheRef;
176 
177 private:
178 			void				_NotifyPageEvents(vm_page* page, uint32 events);
179 
180 	inline	bool				_IsMergeable() const;
181 
182 			void				_MergeWithOnlyConsumer(bool consumerLocked);
183 			void				_RemoveConsumer(VMCache* consumer);
184 
185 private:
186 			int32				fRefCount;
187 			mutex				fLock;
188 			PageEventWaiter*	fPageEventWaiters;
189 			void*				fUserData;
190 			VMCacheRef*			fCacheRef;
191 };
192 
193 
194 #if DEBUG_CACHE_LIST
195 extern VMCache* gDebugCacheList;
196 #endif
197 
198 
199 class VMCacheFactory {
200 public:
201 	static	status_t		CreateAnonymousCache(VMCache*& cache,
202 								bool canOvercommit, int32 numPrecommittedPages,
203 								int32 numGuardPages, bool swappable);
204 	static	status_t		CreateVnodeCache(VMCache*& cache,
205 								struct vnode* vnode);
206 	static	status_t		CreateDeviceCache(VMCache*& cache,
207 								addr_t baseAddress);
208 	static	status_t		CreateNullCache(VMCache*& cache);
209 };
210 
211 
212 
213 bool
214 VMCache::Lock()
215 {
216 	return mutex_lock(&fLock) == B_OK;
217 }
218 
219 
220 bool
221 VMCache::TryLock()
222 {
223 	return mutex_trylock(&fLock) == B_OK;
224 }
225 
226 
227 bool
228 VMCache::SwitchLock(mutex* from)
229 {
230 	return mutex_switch_lock(from, &fLock) == B_OK;
231 }
232 
233 
234 bool
235 VMCache::SwitchFromReadLock(rw_lock* from)
236 {
237 	return mutex_switch_from_read_lock(from, &fLock) == B_OK;
238 }
239 
240 
241 void
242 VMCache::AssertLocked()
243 {
244 	ASSERT_LOCKED_MUTEX(&fLock);
245 }
246 
247 
248 void
249 VMCache::AcquireRefLocked()
250 {
251 	ASSERT_LOCKED_MUTEX(&fLock);
252 
253 	fRefCount++;
254 }
255 
256 
257 void
258 VMCache::AcquireRef()
259 {
260 	Lock();
261 	fRefCount++;
262 	Unlock();
263 }
264 
265 
266 void
267 VMCache::ReleaseRefLocked()
268 {
269 	ASSERT_LOCKED_MUTEX(&fLock);
270 
271 	fRefCount--;
272 }
273 
274 
275 void
276 VMCache::ReleaseRef()
277 {
278 	Lock();
279 	fRefCount--;
280 	Unlock();
281 }
282 
283 
284 void
285 VMCache::ReleaseRefAndUnlock(bool consumerLocked)
286 {
287 	ReleaseRefLocked();
288 	Unlock(consumerLocked);
289 }
290 
291 
292 #ifdef __cplusplus
293 extern "C" {
294 #endif
295 
296 status_t vm_cache_init(struct kernel_args *args);
297 void vm_cache_init_post_heap();
298 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
299 	bool dontWait);
300 
301 #ifdef __cplusplus
302 }
303 #endif
304 
305 
306 #endif	/* _KERNEL_VM_VM_CACHE_H */
307