xref: /haiku/headers/private/kernel/vm/VMCache.h (revision 2141d2fe3a5df2f55f3590f67660573b50d1d1d3)
1 /*
2  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 #ifndef _KERNEL_VM_VM_CACHE_H
10 #define _KERNEL_VM_VM_CACHE_H
11 
12 
13 #include <debug.h>
14 #include <kernel.h>
15 #include <util/DoublyLinkedList.h>
16 #include <vm/vm.h>
17 #include <vm/vm_types.h>
18 
19 #include "kernel_debug_config.h"
20 
21 
22 struct kernel_args;
23 struct ObjectCache;
24 
25 
26 enum {
27 	CACHE_TYPE_RAM = 0,
28 	CACHE_TYPE_VNODE,
29 	CACHE_TYPE_DEVICE,
30 	CACHE_TYPE_NULL
31 };
32 
33 enum {
34 	PAGE_EVENT_NOT_BUSY	= 0x01		// page not busy anymore
35 };
36 
37 
38 extern ObjectCache* gCacheRefObjectCache;
39 extern ObjectCache* gAnonymousCacheObjectCache;
40 extern ObjectCache* gAnonymousNoSwapCacheObjectCache;
41 extern ObjectCache* gVnodeCacheObjectCache;
42 extern ObjectCache* gDeviceCacheObjectCache;
43 extern ObjectCache* gNullCacheObjectCache;
44 
45 
46 struct VMCachePagesTreeDefinition {
47 	typedef page_num_t KeyType;
48 	typedef	vm_page NodeType;
49 
50 	static page_num_t GetKey(const NodeType* node)
51 	{
52 		return node->cache_offset;
53 	}
54 
55 	static SplayTreeLink<NodeType>* GetLink(NodeType* node)
56 	{
57 		return &node->cache_link;
58 	}
59 
60 	static int Compare(page_num_t key, const NodeType* node)
61 	{
62 		return key == node->cache_offset ? 0
63 			: (key < node->cache_offset ? -1 : 1);
64 	}
65 
66 	static NodeType** GetListLink(NodeType* node)
67 	{
68 		return &node->cache_next;
69 	}
70 };
71 
72 typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
73 
74 
75 struct VMCache : public DoublyLinkedListLinkImpl<VMCache> {
76 public:
77 	typedef DoublyLinkedList<VMCache> ConsumerList;
78 
79 public:
80 								VMCache();
81 	virtual						~VMCache();
82 
83 			status_t			Init(uint32 cacheType, uint32 allocationFlags);
84 
85 	virtual	void				Delete();
86 
87 	inline	bool				Lock();
88 	inline	bool				TryLock();
89 	inline	bool				SwitchLock(mutex* from);
90 	inline	bool				SwitchFromReadLock(rw_lock* from);
91 			void				Unlock(bool consumerLocked = false);
92 	inline	void				AssertLocked();
93 
94 	inline	void				AcquireRefLocked();
95 	inline	void				AcquireRef();
96 	inline	void				ReleaseRefLocked();
97 	inline	void				ReleaseRef();
98 	inline	void				ReleaseRefAndUnlock(
99 									bool consumerLocked = false);
100 
101 	inline	VMCacheRef*			CacheRef() const	{ return fCacheRef; }
102 
103 			void				WaitForPageEvents(vm_page* page, uint32 events,
104 									bool relock);
105 			void				NotifyPageEvents(vm_page* page, uint32 events)
106 									{ if (fPageEventWaiters != NULL)
107 										_NotifyPageEvents(page, events); }
108 	inline	void				MarkPageUnbusy(vm_page* page);
109 
110 			vm_page*			LookupPage(off_t offset);
111 			void				InsertPage(vm_page* page, off_t offset);
112 			void				RemovePage(vm_page* page);
113 			void				MovePage(vm_page* page, off_t offset);
114 			void				MovePage(vm_page* page);
115 			void				MoveAllPages(VMCache* fromCache);
116 
117 	inline	page_num_t			WiredPagesCount() const;
118 	inline	void				IncrementWiredPagesCount();
119 	inline	void				DecrementWiredPagesCount();
120 
121 	virtual	int32				GuardSize()	{ return 0; }
122 
123 			void				AddConsumer(VMCache* consumer);
124 
125 			status_t			InsertAreaLocked(VMArea* area);
126 			status_t			RemoveArea(VMArea* area);
127 			void				TransferAreas(VMCache* fromCache);
128 			uint32				CountWritableAreas(VMArea* ignoreArea) const;
129 
130 			status_t			WriteModified();
131 			status_t			SetMinimalCommitment(off_t commitment,
132 									int priority);
133 	virtual	status_t			Resize(off_t newSize, int priority);
134 	virtual	status_t			Rebase(off_t newBase, int priority);
135 	virtual	status_t			Adopt(VMCache* source, off_t offset, off_t size,
136 									off_t newOffset);
137 
138 	virtual	status_t			Discard(off_t offset, off_t size);
139 
140 			status_t			FlushAndRemoveAllPages();
141 
142 			void*				UserData()	{ return fUserData; }
143 			void				SetUserData(void* data)	{ fUserData = data; }
144 									// Settable by the lock owner and valid as
145 									// long as the lock is owned.
146 
147 			// for debugging only
148 			int32				RefCount() const
149 									{ return fRefCount; }
150 
151 	// backing store operations
152 	virtual	status_t			Commit(off_t size, int priority);
153 	virtual	bool				HasPage(off_t offset);
154 
155 	virtual	status_t			Read(off_t offset, const generic_io_vec *vecs,
156 									size_t count, uint32 flags,
157 									generic_size_t *_numBytes);
158 	virtual	status_t			Write(off_t offset, const generic_io_vec *vecs,
159 									size_t count, uint32 flags,
160 									generic_size_t *_numBytes);
161 	virtual	status_t			WriteAsync(off_t offset,
162 									const generic_io_vec* vecs, size_t count,
163 									generic_size_t numBytes, uint32 flags,
164 									AsyncIOCallback* callback);
165 	virtual	bool				CanWritePage(off_t offset);
166 
167 	virtual	int32				MaxPagesPerWrite() const
168 									{ return -1; } // no restriction
169 	virtual	int32				MaxPagesPerAsyncWrite() const
170 									{ return -1; } // no restriction
171 
172 	virtual	status_t			Fault(struct VMAddressSpace *aspace,
173 									off_t offset);
174 
175 	virtual	void				Merge(VMCache* source);
176 
177 	virtual	status_t			AcquireUnreferencedStoreRef();
178 	virtual	void				AcquireStoreRef();
179 	virtual	void				ReleaseStoreRef();
180 
181 	virtual	bool				DebugHasPage(off_t offset);
182 			vm_page*			DebugLookupPage(off_t offset);
183 
184 	virtual	void				Dump(bool showPages) const;
185 
186 protected:
187 	virtual	void				DeleteObject() = 0;
188 
189 public:
190 			VMArea*				areas;
191 			ConsumerList		consumers;
192 				// list of caches that use this cache as a source
193 			VMCachePagesTree	pages;
194 			VMCache*			source;
195 			off_t				virtual_base;
196 			off_t				virtual_end;
197 			off_t				committed_size;
198 				// TODO: Remove!
199 			uint32				page_count;
200 			uint32				temporary : 1;
201 			uint32				unmergeable : 1;
202 			uint32				type : 6;
203 
204 #if DEBUG_CACHE_LIST
205 			VMCache*			debug_previous;
206 			VMCache*			debug_next;
207 #endif
208 
209 private:
210 			struct PageEventWaiter;
211 			friend struct VMCacheRef;
212 
213 private:
214 			void				_NotifyPageEvents(vm_page* page, uint32 events);
215 
216 	inline	bool				_IsMergeable() const;
217 
218 			void				_MergeWithOnlyConsumer();
219 			void				_RemoveConsumer(VMCache* consumer);
220 
221 			bool				_FreePageRange(VMCachePagesTree::Iterator it,
222 									page_num_t* toPage);
223 
224 private:
225 			int32				fRefCount;
226 			mutex				fLock;
227 			PageEventWaiter*	fPageEventWaiters;
228 			void*				fUserData;
229 			VMCacheRef*			fCacheRef;
230 			page_num_t			fWiredPagesCount;
231 };
232 
233 
234 #if DEBUG_CACHE_LIST
235 extern VMCache* gDebugCacheList;
236 #endif
237 
238 
239 class VMCacheFactory {
240 public:
241 	static	status_t		CreateAnonymousCache(VMCache*& cache,
242 								bool canOvercommit, int32 numPrecommittedPages,
243 								int32 numGuardPages, bool swappable,
244 								int priority);
245 	static	status_t		CreateVnodeCache(VMCache*& cache,
246 								struct vnode* vnode);
247 	static	status_t		CreateDeviceCache(VMCache*& cache,
248 								addr_t baseAddress);
249 	static	status_t		CreateNullCache(int priority, VMCache*& cache);
250 };
251 
252 
253 
254 bool
255 VMCache::Lock()
256 {
257 	return mutex_lock(&fLock) == B_OK;
258 }
259 
260 
261 bool
262 VMCache::TryLock()
263 {
264 	return mutex_trylock(&fLock) == B_OK;
265 }
266 
267 
268 bool
269 VMCache::SwitchLock(mutex* from)
270 {
271 	return mutex_switch_lock(from, &fLock) == B_OK;
272 }
273 
274 
275 bool
276 VMCache::SwitchFromReadLock(rw_lock* from)
277 {
278 	return mutex_switch_from_read_lock(from, &fLock) == B_OK;
279 }
280 
281 
282 void
283 VMCache::AssertLocked()
284 {
285 	ASSERT_LOCKED_MUTEX(&fLock);
286 }
287 
288 
289 void
290 VMCache::AcquireRefLocked()
291 {
292 	ASSERT_LOCKED_MUTEX(&fLock);
293 
294 	fRefCount++;
295 }
296 
297 
298 void
299 VMCache::AcquireRef()
300 {
301 	Lock();
302 	fRefCount++;
303 	Unlock();
304 }
305 
306 
307 void
308 VMCache::ReleaseRefLocked()
309 {
310 	ASSERT_LOCKED_MUTEX(&fLock);
311 
312 	fRefCount--;
313 }
314 
315 
316 void
317 VMCache::ReleaseRef()
318 {
319 	Lock();
320 	fRefCount--;
321 	Unlock();
322 }
323 
324 
325 void
326 VMCache::ReleaseRefAndUnlock(bool consumerLocked)
327 {
328 	ReleaseRefLocked();
329 	Unlock(consumerLocked);
330 }
331 
332 
333 void
334 VMCache::MarkPageUnbusy(vm_page* page)
335 {
336 	ASSERT(page->busy);
337 	page->busy = false;
338 	NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
339 }
340 
341 
342 page_num_t
343 VMCache::WiredPagesCount() const
344 {
345 	return fWiredPagesCount;
346 }
347 
348 
349 void
350 VMCache::IncrementWiredPagesCount()
351 {
352 	ASSERT(fWiredPagesCount < page_count);
353 
354 	fWiredPagesCount++;
355 }
356 
357 
358 void
359 VMCache::DecrementWiredPagesCount()
360 {
361 	ASSERT(fWiredPagesCount > 0);
362 
363 	fWiredPagesCount--;
364 }
365 
366 
367 // vm_page methods implemented here to avoid VMCache.h inclusion in vm_types.h
368 
369 inline void
370 vm_page::IncrementWiredCount()
371 {
372 	if (fWiredCount++ == 0)
373 		cache_ref->cache->IncrementWiredPagesCount();
374 }
375 
376 
377 inline void
378 vm_page::DecrementWiredCount()
379 {
380 	ASSERT_PRINT(fWiredCount > 0, "page: %#" B_PRIx64, physical_page_number * B_PAGE_SIZE);
381 
382 	if (--fWiredCount == 0)
383 		cache_ref->cache->DecrementWiredPagesCount();
384 }
385 
386 
387 #ifdef __cplusplus
388 extern "C" {
389 #endif
390 
391 status_t vm_cache_init(struct kernel_args *args);
392 void vm_cache_init_post_heap();
393 struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
394 	bool dontWait);
395 
396 #ifdef __cplusplus
397 }
398 #endif
399 
400 
401 #endif	/* _KERNEL_VM_VM_CACHE_H */
402