xref: /haiku/headers/private/kernel/vm/VMCache.h (revision 9760dcae2038d47442f4658c2575844c6cf92c40)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_VM_VM_CACHE_H
10 #define _KERNEL_VM_VM_CACHE_H
11 
12 
13 #include <debug.h>
14 #include <kernel.h>
15 #include <vm/vm.h>
16 #include <vm/vm_types.h>
17 
18 #include "kernel_debug_config.h"
19 
20 
21 struct kernel_args;
22 
23 
24 enum {
25 	CACHE_TYPE_RAM = 0,
26 	CACHE_TYPE_VNODE,
27 	CACHE_TYPE_DEVICE,
28 	CACHE_TYPE_NULL
29 };
30 
31 enum {
32 	PAGE_EVENT_NOT_BUSY	= 0x01		// page not busy anymore
33 };
34 
35 
36 struct VMCachePagesTreeDefinition {
37 	typedef page_num_t KeyType;
38 	typedef	vm_page NodeType;
39 
40 	static page_num_t GetKey(const NodeType* node)
41 	{
42 		return node->cache_offset;
43 	}
44 
45 	static SplayTreeLink<NodeType>* GetLink(NodeType* node)
46 	{
47 		return &node->cache_link;
48 	}
49 
50 	static int Compare(page_num_t key, const NodeType* node)
51 	{
52 		return key == node->cache_offset ? 0
53 			: (key < node->cache_offset ? -1 : 1);
54 	}
55 
56 	static NodeType** GetListLink(NodeType* node)
57 	{
58 		return &node->cache_next;
59 	}
60 };
61 
62 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
63 
64 struct VMCache {
65 public:
66 								VMCache();
67 	virtual						~VMCache();
68 
69 			status_t			Init(uint32 cacheType);
70 
71 	virtual	void				Delete();
72 
73 	inline	bool				Lock();
74 	inline	bool				TryLock();
75 	inline	bool				SwitchLock(mutex* from);
76 	inline	bool				SwitchFromReadLock(rw_lock* from);
77 			void				Unlock();
78 	inline	void				AssertLocked();
79 
80 	inline	void				AcquireRefLocked();
81 	inline	void				AcquireRef();
82 	inline	void				ReleaseRefLocked();
83 	inline	void				ReleaseRef();
84 	inline	void				ReleaseRefAndUnlock();
85 
86 			void				WaitForPageEvents(vm_page* page, uint32 events,
87 									bool relock);
88 			void				NotifyPageEvents(vm_page* page, uint32 events)
89 									{ if (fPageEventWaiters != NULL)
90 										_NotifyPageEvents(page, events); }
91 
92 			vm_page*			LookupPage(off_t offset);
93 			void				InsertPage(vm_page* page, off_t offset);
94 			void				RemovePage(vm_page* page);
95 			void				MovePage(vm_page* page);
96 			void				MoveAllPages(VMCache* fromCache);
97 
98 			void				AddConsumer(VMCache* consumer);
99 
100 			status_t			InsertAreaLocked(VMArea* area);
101 			status_t			RemoveArea(VMArea* area);
102 			void				TransferAreas(VMCache* fromCache);
103 			uint32				CountWritableAreas(VMArea* ignoreArea) const;
104 
105 			status_t			WriteModified();
106 			status_t			SetMinimalCommitment(off_t commitment);
107 			status_t			Resize(off_t newSize);
108 
109 			status_t			FlushAndRemoveAllPages();
110 
111 			// for debugging only
112 			mutex*				GetLock()
113 									{ return &fLock; }
114 			int32				RefCount() const
115 									{ return fRefCount; }
116 
117 	// backing store operations
118 	virtual	status_t			Commit(off_t size);
119 	virtual	bool				HasPage(off_t offset);
120 
121 	virtual	status_t			Read(off_t offset, const iovec *vecs,
122 									size_t count,uint32 flags,
123 									size_t *_numBytes);
124 	virtual	status_t			Write(off_t offset, const iovec *vecs, size_t count,
125 									uint32 flags, size_t *_numBytes);
126 	virtual	status_t			WriteAsync(off_t offset, const iovec* vecs,
127 									size_t count, size_t numBytes, uint32 flags,
128 									AsyncIOCallback* callback);
129 	virtual	bool				CanWritePage(off_t offset);
130 
131 	virtual	int32				MaxPagesPerWrite() const
132 									{ return -1; } // no restriction
133 	virtual	int32				MaxPagesPerAsyncWrite() const
134 									{ return -1; } // no restriction
135 
136 	virtual	status_t			Fault(struct VMAddressSpace *aspace,
137 									off_t offset);
138 
139 	virtual	void				Merge(VMCache* source);
140 
141 	virtual	status_t			AcquireUnreferencedStoreRef();
142 	virtual	void				AcquireStoreRef();
143 	virtual	void				ReleaseStoreRef();
144 
145 public:
146 			VMArea*				areas;
147 			list_link			consumer_link;
148 			list				consumers;
149 				// list of caches that use this cache as a source
150 			VMCachePagesTree	pages;
151 			VMCache*			source;
152 			off_t				virtual_base;
153 			off_t				virtual_end;
154 			off_t				committed_size;
155 				// TODO: Remove!
156 			uint32				page_count;
157 			uint32				temporary : 1;
158 			uint32				scan_skip : 1;
159 			uint32				type : 6;
160 
161 #if DEBUG_CACHE_LIST
162 			VMCache*			debug_previous;
163 			VMCache*			debug_next;
164 #endif
165 
166 private:
167 			struct PageEventWaiter;
168 
169 private:
170 			void				_NotifyPageEvents(vm_page* page, uint32 events);
171 
172 	inline	bool				_IsMergeable() const;
173 
174 			void				_MergeWithOnlyConsumer();
175 			void				_RemoveConsumer(VMCache* consumer);
176 
177 private:
178 			int32				fRefCount;
179 			mutex				fLock;
180 			PageEventWaiter*	fPageEventWaiters;
181 };
182 
183 
184 #if DEBUG_CACHE_LIST
185 extern VMCache* gDebugCacheList;
186 #endif
187 
188 
189 class VMCacheFactory {
190 public:
191 	static	status_t		CreateAnonymousCache(VMCache*& cache,
192 								bool canOvercommit, int32 numPrecommittedPages,
193 								int32 numGuardPages, bool swappable);
194 	static	status_t		CreateVnodeCache(VMCache*& cache,
195 								struct vnode* vnode);
196 	static	status_t		CreateDeviceCache(VMCache*& cache,
197 								addr_t baseAddress);
198 	static	status_t		CreateNullCache(VMCache*& cache);
199 };
200 
201 
202 
203 bool
204 VMCache::Lock()
205 {
206 	return mutex_lock(&fLock) == B_OK;
207 }
208 
209 
210 bool
211 VMCache::TryLock()
212 {
213 	return mutex_trylock(&fLock) == B_OK;
214 }
215 
216 
217 bool
218 VMCache::SwitchLock(mutex* from)
219 {
220 	return mutex_switch_lock(from, &fLock) == B_OK;
221 }
222 
223 
224 bool
225 VMCache::SwitchFromReadLock(rw_lock* from)
226 {
227 	return mutex_switch_from_read_lock(from, &fLock) == B_OK;
228 }
229 
230 
231 void
232 VMCache::AssertLocked()
233 {
234 	ASSERT_LOCKED_MUTEX(&fLock);
235 }
236 
237 
238 void
239 VMCache::AcquireRefLocked()
240 {
241 	ASSERT_LOCKED_MUTEX(&fLock);
242 
243 	fRefCount++;
244 }
245 
246 
247 void
248 VMCache::AcquireRef()
249 {
250 	Lock();
251 	fRefCount++;
252 	Unlock();
253 }
254 
255 
256 void
257 VMCache::ReleaseRefLocked()
258 {
259 	ASSERT_LOCKED_MUTEX(&fLock);
260 
261 	fRefCount--;
262 }
263 
264 
265 void
266 VMCache::ReleaseRef()
267 {
268 	Lock();
269 	fRefCount--;
270 	Unlock();
271 }
272 
273 
274 void
275 VMCache::ReleaseRefAndUnlock()
276 {
277 	ReleaseRefLocked();
278 	Unlock();
279 }
280 
281 
282 #ifdef __cplusplus
283 extern "C" {
284 #endif
285 
286 status_t vm_cache_init(struct kernel_args *args);
287 void vm_cache_init_post_heap();
288 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
289 	bool dontWait);
290 
291 #ifdef __cplusplus
292 }
293 #endif
294 
295 
296 #endif	/* _KERNEL_VM_VM_CACHE_H */
297