xref: /haiku/headers/private/kernel/vm/VMCache.h (revision 6440406a59aec3d5c373459755f3f0ec9f855954)
1e50cf876SIngo Weinhold /*
2be7328a9SIngo Weinhold  * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3e50cf876SIngo Weinhold  * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
4e50cf876SIngo Weinhold  * Distributed under the terms of the MIT License.
5e50cf876SIngo Weinhold  *
6e50cf876SIngo Weinhold  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7e50cf876SIngo Weinhold  * Distributed under the terms of the NewOS License.
8e50cf876SIngo Weinhold  */
9e50cf876SIngo Weinhold #ifndef _KERNEL_VM_VM_CACHE_H
10e50cf876SIngo Weinhold #define _KERNEL_VM_VM_CACHE_H
11e50cf876SIngo Weinhold 
12e50cf876SIngo Weinhold 
13e50cf876SIngo Weinhold #include <kernel.h>
14e50cf876SIngo Weinhold #include <vm/vm.h>
15be7328a9SIngo Weinhold #include <vm/vm_types.h>
16be7328a9SIngo Weinhold 
17be7328a9SIngo Weinhold #include "kernel_debug_config.h"
18e50cf876SIngo Weinhold 
19e50cf876SIngo Weinhold 
20e50cf876SIngo Weinhold struct kernel_args;
21e50cf876SIngo Weinhold 
22e50cf876SIngo Weinhold 
23be7328a9SIngo Weinhold enum {
24be7328a9SIngo Weinhold 	CACHE_TYPE_RAM = 0,
25be7328a9SIngo Weinhold 	CACHE_TYPE_VNODE,
26be7328a9SIngo Weinhold 	CACHE_TYPE_DEVICE,
27be7328a9SIngo Weinhold 	CACHE_TYPE_NULL
28be7328a9SIngo Weinhold };
29be7328a9SIngo Weinhold 
30be7328a9SIngo Weinhold struct VMCachePagesTreeDefinition {
31be7328a9SIngo Weinhold 	typedef page_num_t KeyType;
32be7328a9SIngo Weinhold 	typedef	vm_page NodeType;
33be7328a9SIngo Weinhold 
34be7328a9SIngo Weinhold 	static page_num_t GetKey(const NodeType* node)
35be7328a9SIngo Weinhold 	{
36be7328a9SIngo Weinhold 		return node->cache_offset;
37be7328a9SIngo Weinhold 	}
38be7328a9SIngo Weinhold 
39be7328a9SIngo Weinhold 	static SplayTreeLink<NodeType>* GetLink(NodeType* node)
40be7328a9SIngo Weinhold 	{
41be7328a9SIngo Weinhold 		return &node->cache_link;
42be7328a9SIngo Weinhold 	}
43be7328a9SIngo Weinhold 
44be7328a9SIngo Weinhold 	static int Compare(page_num_t key, const NodeType* node)
45be7328a9SIngo Weinhold 	{
46be7328a9SIngo Weinhold 		return key == node->cache_offset ? 0
47be7328a9SIngo Weinhold 			: (key < node->cache_offset ? -1 : 1);
48be7328a9SIngo Weinhold 	}
49be7328a9SIngo Weinhold 
50be7328a9SIngo Weinhold 	static NodeType** GetListLink(NodeType* node)
51be7328a9SIngo Weinhold 	{
52be7328a9SIngo Weinhold 		return &node->cache_next;
53be7328a9SIngo Weinhold 	}
54be7328a9SIngo Weinhold };
55be7328a9SIngo Weinhold 
56be7328a9SIngo Weinhold typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
57be7328a9SIngo Weinhold 
58be7328a9SIngo Weinhold struct VMCache {
59be7328a9SIngo Weinhold public:
60be7328a9SIngo Weinhold 								VMCache();
61be7328a9SIngo Weinhold 	virtual						~VMCache();
62be7328a9SIngo Weinhold 
63be7328a9SIngo Weinhold 			status_t			Init(uint32 cacheType);
64be7328a9SIngo Weinhold 
65be7328a9SIngo Weinhold 	virtual	void				Delete();
66be7328a9SIngo Weinhold 
67be7328a9SIngo Weinhold 			bool				Lock()
68be7328a9SIngo Weinhold 									{ return mutex_lock(&fLock) == B_OK; }
69be7328a9SIngo Weinhold 			bool				TryLock()
70be7328a9SIngo Weinhold 									{ return mutex_trylock(&fLock) == B_OK; }
71be7328a9SIngo Weinhold 			bool				SwitchLock(mutex* from)
72be7328a9SIngo Weinhold 									{ return mutex_switch_lock(from, &fLock)
73be7328a9SIngo Weinhold 											== B_OK; }
74be7328a9SIngo Weinhold 			void				Unlock();
75be7328a9SIngo Weinhold 			void				AssertLocked()
76be7328a9SIngo Weinhold 									{ ASSERT_LOCKED_MUTEX(&fLock); }
77be7328a9SIngo Weinhold 
78be7328a9SIngo Weinhold 			void				AcquireRefLocked();
79be7328a9SIngo Weinhold 			void				AcquireRef();
80be7328a9SIngo Weinhold 			void				ReleaseRefLocked();
81be7328a9SIngo Weinhold 			void				ReleaseRef();
82be7328a9SIngo Weinhold 			void				ReleaseRefAndUnlock()
83be7328a9SIngo Weinhold 									{ ReleaseRefLocked(); Unlock(); }
84be7328a9SIngo Weinhold 
85be7328a9SIngo Weinhold 			vm_page*			LookupPage(off_t offset);
86be7328a9SIngo Weinhold 			void				InsertPage(vm_page* page, off_t offset);
87be7328a9SIngo Weinhold 			void				RemovePage(vm_page* page);
88be7328a9SIngo Weinhold 
89be7328a9SIngo Weinhold 			void				AddConsumer(VMCache* consumer);
90be7328a9SIngo Weinhold 
91be7328a9SIngo Weinhold 			status_t			InsertAreaLocked(VMArea* area);
92be7328a9SIngo Weinhold 			status_t			RemoveArea(VMArea* area);
93be7328a9SIngo Weinhold 
94be7328a9SIngo Weinhold 			status_t			WriteModified();
95be7328a9SIngo Weinhold 			status_t			SetMinimalCommitment(off_t commitment);
96be7328a9SIngo Weinhold 			status_t			Resize(off_t newSize);
97be7328a9SIngo Weinhold 
98be7328a9SIngo Weinhold 			status_t			FlushAndRemoveAllPages();
99be7328a9SIngo Weinhold 
100be7328a9SIngo Weinhold 			// for debugging only
101be7328a9SIngo Weinhold 			mutex*				GetLock()
102be7328a9SIngo Weinhold 									{ return &fLock; }
103be7328a9SIngo Weinhold 			int32				RefCount() const
104be7328a9SIngo Weinhold 									{ return fRefCount; }
105be7328a9SIngo Weinhold 
106be7328a9SIngo Weinhold 	// backing store operations
107be7328a9SIngo Weinhold 	virtual	status_t			Commit(off_t size);
108be7328a9SIngo Weinhold 	virtual	bool				HasPage(off_t offset);
109be7328a9SIngo Weinhold 
110*6440406aSIngo Weinhold 	virtual	status_t			Read(off_t offset, const iovec *vecs,
111*6440406aSIngo Weinhold 									size_t count,uint32 flags,
112*6440406aSIngo Weinhold 									size_t *_numBytes);
113be7328a9SIngo Weinhold 	virtual	status_t			Write(off_t offset, const iovec *vecs, size_t count,
114be7328a9SIngo Weinhold 									uint32 flags, size_t *_numBytes);
115be7328a9SIngo Weinhold 	virtual	status_t			WriteAsync(off_t offset, const iovec* vecs,
116be7328a9SIngo Weinhold 									size_t count, size_t numBytes, uint32 flags,
117be7328a9SIngo Weinhold 									AsyncIOCallback* callback);
118be7328a9SIngo Weinhold 	virtual	bool				CanWritePage(off_t offset);
119be7328a9SIngo Weinhold 
120be7328a9SIngo Weinhold 	virtual	int32				MaxPagesPerWrite() const
121be7328a9SIngo Weinhold 									{ return -1; } // no restriction
122be7328a9SIngo Weinhold 	virtual	int32				MaxPagesPerAsyncWrite() const
123be7328a9SIngo Weinhold 									{ return -1; } // no restriction
124be7328a9SIngo Weinhold 
125be7328a9SIngo Weinhold 	virtual	status_t			Fault(struct VMAddressSpace *aspace,
126be7328a9SIngo Weinhold 									off_t offset);
127be7328a9SIngo Weinhold 
128be7328a9SIngo Weinhold 	virtual	void				Merge(VMCache* source);
129be7328a9SIngo Weinhold 
130be7328a9SIngo Weinhold 	virtual	status_t			AcquireUnreferencedStoreRef();
131be7328a9SIngo Weinhold 	virtual	void				AcquireStoreRef();
132be7328a9SIngo Weinhold 	virtual	void				ReleaseStoreRef();
133be7328a9SIngo Weinhold 
134be7328a9SIngo Weinhold public:
135*6440406aSIngo Weinhold 			VMArea*				areas;
136*6440406aSIngo Weinhold 			list_link			consumer_link;
137*6440406aSIngo Weinhold 			list				consumers;
138be7328a9SIngo Weinhold 				// list of caches that use this cache as a source
139be7328a9SIngo Weinhold 			VMCachePagesTree	pages;
140be7328a9SIngo Weinhold 			VMCache*			source;
141be7328a9SIngo Weinhold 			off_t				virtual_base;
142be7328a9SIngo Weinhold 			off_t				virtual_end;
143be7328a9SIngo Weinhold 			off_t				committed_size;
144be7328a9SIngo Weinhold 				// TODO: Remove!
145be7328a9SIngo Weinhold 			uint32				page_count;
146be7328a9SIngo Weinhold 			uint32				temporary : 1;
147be7328a9SIngo Weinhold 			uint32				scan_skip : 1;
148be7328a9SIngo Weinhold 			uint32				type : 6;
149be7328a9SIngo Weinhold 
150be7328a9SIngo Weinhold #if DEBUG_CACHE_LIST
151*6440406aSIngo Weinhold 			VMCache*			debug_previous;
152*6440406aSIngo Weinhold 			VMCache*			debug_next;
153be7328a9SIngo Weinhold #endif
154be7328a9SIngo Weinhold 
155be7328a9SIngo Weinhold private:
156*6440406aSIngo Weinhold 	inline	bool				_IsMergeable() const;
157*6440406aSIngo Weinhold 
158*6440406aSIngo Weinhold 			void				_MergeWithOnlyConsumer();
159*6440406aSIngo Weinhold 			void				_RemoveConsumer(VMCache* consumer);
160*6440406aSIngo Weinhold 
161*6440406aSIngo Weinhold private:
162be7328a9SIngo Weinhold 			int32				fRefCount;
163be7328a9SIngo Weinhold 			mutex				fLock;
164be7328a9SIngo Weinhold };
165be7328a9SIngo Weinhold 
166be7328a9SIngo Weinhold 
167be7328a9SIngo Weinhold #if DEBUG_CACHE_LIST
168be7328a9SIngo Weinhold extern VMCache* gDebugCacheList;
169be7328a9SIngo Weinhold #endif
170be7328a9SIngo Weinhold 
171be7328a9SIngo Weinhold 
172be7328a9SIngo Weinhold class VMCacheFactory {
173be7328a9SIngo Weinhold public:
174be7328a9SIngo Weinhold 	static	status_t		CreateAnonymousCache(VMCache*& cache,
175be7328a9SIngo Weinhold 								bool canOvercommit, int32 numPrecommittedPages,
176be7328a9SIngo Weinhold 								int32 numGuardPages, bool swappable);
177be7328a9SIngo Weinhold 	static	status_t		CreateVnodeCache(VMCache*& cache,
178be7328a9SIngo Weinhold 								struct vnode* vnode);
179be7328a9SIngo Weinhold 	static	status_t		CreateDeviceCache(VMCache*& cache,
180be7328a9SIngo Weinhold 								addr_t baseAddress);
181be7328a9SIngo Weinhold 	static	status_t		CreateNullCache(VMCache*& cache);
182be7328a9SIngo Weinhold };
183be7328a9SIngo Weinhold 
184be7328a9SIngo Weinhold 
185e50cf876SIngo Weinhold #ifdef __cplusplus
186e50cf876SIngo Weinhold extern "C" {
187e50cf876SIngo Weinhold #endif
188e50cf876SIngo Weinhold 
189e50cf876SIngo Weinhold status_t vm_cache_init(struct kernel_args *args);
190e50cf876SIngo Weinhold struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
191e50cf876SIngo Weinhold 	bool dontWait);
192e50cf876SIngo Weinhold 
193e50cf876SIngo Weinhold #ifdef __cplusplus
194e50cf876SIngo Weinhold }
195e50cf876SIngo Weinhold #endif
196e50cf876SIngo Weinhold 
197be7328a9SIngo Weinhold 
198e50cf876SIngo Weinhold #endif	/* _KERNEL_VM_VM_CACHE_H */
199