xref: /haiku/src/system/kernel/fs/Vnode.h (revision d0ac609964842f8cdb6d54b3c539c6c15293e172)
1 /*
2  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef VNODE_H
6 #define VNODE_H
7 
8 
9 #include <fs_interface.h>
10 
11 #include <util/DoublyLinkedList.h>
12 #include <util/list.h>
13 
14 #include <lock.h>
15 #include <thread.h>
16 
17 
18 struct advisory_locking;
19 struct file_descriptor;
20 struct fs_mount;
21 struct VMCache;
22 
23 typedef struct vnode Vnode;
24 
25 
26 struct vnode : fs_vnode, DoublyLinkedListLinkImpl<vnode> {
27 			struct vnode*		next;
28 			VMCache*			cache;
29 			struct fs_mount*	mount;
30 			struct vnode*		covered_by;
31 			struct vnode*		covers;
32 			struct advisory_locking* advisory_locking;
33 			struct file_descriptor* mandatory_locked_by;
34 			list_link			unused_link;
35 			ino_t				id;
36 			dev_t				device;
37 			int32				ref_count;
38 
39 public:
40 	inline	bool				IsBusy() const;
41 	inline	void				SetBusy(bool busy);
42 
43 	inline	bool				IsRemoved() const;
44 	inline	void				SetRemoved(bool removed);
45 
46 	inline	bool				IsUnpublished() const;
47 	inline	void				SetUnpublished(bool unpublished);
48 
49 	inline	bool				IsUnused() const;
50 	inline	void				SetUnused(bool unused);
51 
52 	inline	bool				IsHot() const;
53 	inline	void				SetHot(bool hot);
54 
55 	// setter requires sVnodeLock write-locked, getter is lockless
56 	inline	bool				IsCovered() const;
57 	inline	void				SetCovered(bool covered);
58 
59 	// setter requires sVnodeLock write-locked, getter is lockless
60 	inline	bool				IsCovering() const;
61 	inline	void				SetCovering(bool covering);
62 
63 	inline	uint32				Type() const;
64 	inline	void				SetType(uint32 type);
65 
66 	inline	bool				Lock();
67 	inline	void				Unlock();
68 
69 	static	void				StaticInit();
70 
71 private:
72 	static	const uint32		kFlagsLocked		= 0x00000001;
73 	static	const uint32		kFlagsWaitingLocker	= 0x00000002;
74 	static	const uint32		kFlagsBusy			= 0x00000004;
75 	static	const uint32		kFlagsRemoved		= 0x00000008;
76 	static	const uint32		kFlagsUnpublished	= 0x00000010;
77 	static	const uint32		kFlagsUnused		= 0x00000020;
78 	static	const uint32		kFlagsHot			= 0x00000040;
79 	static	const uint32		kFlagsCovered		= 0x00000080;
80 	static	const uint32		kFlagsCovering		= 0x00000100;
81 	static	const uint32		kFlagsType			= 0xfffff000;
82 
83 	static	const uint32		kBucketCount		= 32;
84 
85 			struct LockWaiter : DoublyLinkedListLinkImpl<LockWaiter> {
86 				LockWaiter*		next;
87 				Thread*			thread;
88 				struct vnode*	vnode;
89 			};
90 
91 			typedef DoublyLinkedList<LockWaiter> LockWaiterList;
92 
93 			struct Bucket {
94 				mutex			lock;
95 				LockWaiterList	waiters;
96 
97 				Bucket();
98 			};
99 
100 private:
101 	inline	Bucket&				_Bucket() const;
102 
103 			void				_WaitForLock();
104 			void				_WakeUpLocker();
105 
106 private:
107 			int32				fFlags;
108 
109 	static	Bucket				sBuckets[kBucketCount];
110 };
111 
112 
113 bool
114 vnode::IsBusy() const
115 {
116 	return (fFlags & kFlagsBusy) != 0;
117 }
118 
119 
120 void
121 vnode::SetBusy(bool busy)
122 {
123 	if (busy)
124 		atomic_or(&fFlags, kFlagsBusy);
125 	else
126 		atomic_and(&fFlags, ~kFlagsBusy);
127 }
128 
129 
130 bool
131 vnode::IsRemoved() const
132 {
133 	return (fFlags & kFlagsRemoved) != 0;
134 }
135 
136 
137 void
138 vnode::SetRemoved(bool removed)
139 {
140 	if (removed)
141 		atomic_or(&fFlags, kFlagsRemoved);
142 	else
143 		atomic_and(&fFlags, ~kFlagsRemoved);
144 }
145 
146 
147 bool
148 vnode::IsUnpublished() const
149 {
150 	return (fFlags & kFlagsUnpublished) != 0;
151 }
152 
153 
154 void
155 vnode::SetUnpublished(bool unpublished)
156 {
157 	if (unpublished)
158 		atomic_or(&fFlags, kFlagsUnpublished);
159 	else
160 		atomic_and(&fFlags, ~kFlagsUnpublished);
161 }
162 
163 
164 bool
165 vnode::IsUnused() const
166 {
167 	return (fFlags & kFlagsUnused) != 0;
168 }
169 
170 
171 void
172 vnode::SetUnused(bool unused)
173 {
174 	if (unused)
175 		atomic_or(&fFlags, kFlagsUnused);
176 	else
177 		atomic_and(&fFlags, ~kFlagsUnused);
178 }
179 
180 
181 bool
182 vnode::IsHot() const
183 {
184 	return (fFlags & kFlagsHot) != 0;
185 }
186 
187 
188 void
189 vnode::SetHot(bool hot)
190 {
191 	if (hot)
192 		atomic_or(&fFlags, kFlagsHot);
193 	else
194 		atomic_and(&fFlags, ~kFlagsHot);
195 }
196 
197 
198 bool
199 vnode::IsCovered() const
200 {
201 	return (fFlags & kFlagsCovered) != 0;
202 }
203 
204 
205 void
206 vnode::SetCovered(bool covered)
207 {
208 	if (covered)
209 		atomic_or(&fFlags, kFlagsCovered);
210 	else
211 		atomic_and(&fFlags, ~kFlagsCovered);
212 }
213 
214 
215 bool
216 vnode::IsCovering() const
217 {
218 	return (fFlags & kFlagsCovering) != 0;
219 }
220 
221 
222 void
223 vnode::SetCovering(bool covering)
224 {
225 	if (covering)
226 		atomic_or(&fFlags, kFlagsCovering);
227 	else
228 		atomic_and(&fFlags, ~kFlagsCovering);
229 }
230 
231 
232 uint32
233 vnode::Type() const
234 {
235 	return (uint32)fFlags & kFlagsType;
236 }
237 
238 
239 void
240 vnode::SetType(uint32 type)
241 {
242 	atomic_and(&fFlags, ~kFlagsType);
243 	atomic_or(&fFlags, type & kFlagsType);
244 }
245 
246 
247 /*!	Locks the vnode.
248 	The caller must hold sVnodeLock (at least read locked) and must continue to
249 	hold it until calling Unlock(). After acquiring the lock the caller is
250 	allowed to write access the vnode's mutable fields, if it hasn't been marked
251 	busy by someone else.
252 	Due to the condition of holding sVnodeLock at least read locked, write
253 	locking it grants the same write access permission to *any* vnode.
254 
255 	The vnode's lock should be held only for a short time. It can be held over
256 	sUnusedVnodesLock.
257 
258 	\return Always \c true.
259 */
260 bool
261 vnode::Lock()
262 {
263 	if ((atomic_or(&fFlags, kFlagsLocked)
264 			& (kFlagsLocked | kFlagsWaitingLocker)) != 0) {
265 		_WaitForLock();
266 	}
267 
268 	return true;
269 }
270 
271 void
272 vnode::Unlock()
273 {
274 	if ((atomic_and(&fFlags, ~kFlagsLocked) & kFlagsWaitingLocker) != 0)
275 		_WakeUpLocker();
276 }
277 
278 
279 vnode::Bucket&
280 vnode::_Bucket() const
281 {
282 	return sBuckets[((addr_t)this / 64) % kBucketCount];
283 		// The vnode structure is somewhat larger than 64 bytes (on 32 bit
284 		// archs), so subsequently allocated vnodes fall into different
285 		// buckets. How exactly the vnodes are distributed depends on the
286 		// allocator -- a dedicated slab would be perfect.
287 }
288 
289 
290 #endif	// VNODE_H
291