1 /*
2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5 #ifndef VNODE_H
6 #define VNODE_H
7
8
9 #include <fs_interface.h>
10
11 #include <util/DoublyLinkedList.h>
12 #include <util/list.h>
13
14 #include <lock.h>
15 #include <thread.h>
16
17
18 struct advisory_locking;
19 struct file_descriptor;
20 struct fs_mount;
21 struct VMCache;
22
23 typedef struct vnode Vnode;
24
25
26 struct vnode : fs_vnode, DoublyLinkedListLinkImpl<vnode> {
27 struct vnode* hash_next;
28 VMCache* cache;
29 struct fs_mount* mount;
30 struct vnode* covered_by;
31 struct vnode* covers;
32 struct advisory_locking* advisory_locking;
33 struct file_descriptor* mandatory_locked_by;
34 DoublyLinkedListLink<struct vnode> unused_link;
35 ino_t id;
36 dev_t device;
37 int32 ref_count;
38
39 public:
40 inline bool IsBusy() const;
41 inline void SetBusy(bool busy);
42
43 inline bool IsRemoved() const;
44 inline void SetRemoved(bool removed);
45
46 inline bool IsUnpublished() const;
47 inline void SetUnpublished(bool unpublished);
48
49 inline bool IsUnused() const;
50 inline void SetUnused(bool unused);
51
52 inline bool IsHot() const;
53 inline void SetHot(bool hot);
54
55 // setter requires sVnodeLock write-locked, getter is lockless
56 inline bool IsCovered() const;
57 inline void SetCovered(bool covered);
58
59 // setter requires sVnodeLock write-locked, getter is lockless
60 inline bool IsCovering() const;
61 inline void SetCovering(bool covering);
62
63 inline uint32 Type() const;
64 inline void SetType(uint32 type);
65
66 inline bool Lock();
67 inline void Unlock();
68
69 static void StaticInit();
70
71 private:
72 static const uint32 kFlagsLocked = 0x00000001;
73 static const uint32 kFlagsWaitingLocker = 0x00000002;
74 static const uint32 kFlagsBusy = 0x00000004;
75 static const uint32 kFlagsRemoved = 0x00000008;
76 static const uint32 kFlagsUnpublished = 0x00000010;
77 static const uint32 kFlagsUnused = 0x00000020;
78 static const uint32 kFlagsHot = 0x00000040;
79 static const uint32 kFlagsCovered = 0x00000080;
80 static const uint32 kFlagsCovering = 0x00000100;
81 static const uint32 kFlagsType = 0xfffff000;
82
83 static const uint32 kBucketCount = 32;
84
85 struct LockWaiter : DoublyLinkedListLinkImpl<LockWaiter> {
86 Thread* thread;
87 struct vnode* vnode;
88 };
89
90 typedef DoublyLinkedList<LockWaiter> LockWaiterList;
91
92 struct Bucket {
93 mutex lock;
94 LockWaiterList waiters;
95
96 Bucket();
97 };
98
99 private:
100 inline Bucket& _Bucket() const;
101
102 void _WaitForLock();
103 void _WakeUpLocker();
104
105 private:
106 int32 fFlags;
107
108 static Bucket sBuckets[kBucketCount];
109 };
110
111
112 bool
IsBusy()113 vnode::IsBusy() const
114 {
115 return (fFlags & kFlagsBusy) != 0;
116 }
117
118
119 void
SetBusy(bool busy)120 vnode::SetBusy(bool busy)
121 {
122 if (busy)
123 atomic_or(&fFlags, kFlagsBusy);
124 else
125 atomic_and(&fFlags, ~kFlagsBusy);
126 }
127
128
129 bool
IsRemoved()130 vnode::IsRemoved() const
131 {
132 return (fFlags & kFlagsRemoved) != 0;
133 }
134
135
136 void
SetRemoved(bool removed)137 vnode::SetRemoved(bool removed)
138 {
139 if (removed)
140 atomic_or(&fFlags, kFlagsRemoved);
141 else
142 atomic_and(&fFlags, ~kFlagsRemoved);
143 }
144
145
146 bool
IsUnpublished()147 vnode::IsUnpublished() const
148 {
149 return (fFlags & kFlagsUnpublished) != 0;
150 }
151
152
153 void
SetUnpublished(bool unpublished)154 vnode::SetUnpublished(bool unpublished)
155 {
156 if (unpublished)
157 atomic_or(&fFlags, kFlagsUnpublished);
158 else
159 atomic_and(&fFlags, ~kFlagsUnpublished);
160 }
161
162
163 bool
IsUnused()164 vnode::IsUnused() const
165 {
166 return (fFlags & kFlagsUnused) != 0;
167 }
168
169
170 void
SetUnused(bool unused)171 vnode::SetUnused(bool unused)
172 {
173 if (unused)
174 atomic_or(&fFlags, kFlagsUnused);
175 else
176 atomic_and(&fFlags, ~kFlagsUnused);
177 }
178
179
180 bool
IsHot()181 vnode::IsHot() const
182 {
183 return (fFlags & kFlagsHot) != 0;
184 }
185
186
187 void
SetHot(bool hot)188 vnode::SetHot(bool hot)
189 {
190 if (hot)
191 atomic_or(&fFlags, kFlagsHot);
192 else
193 atomic_and(&fFlags, ~kFlagsHot);
194 }
195
196
197 bool
IsCovered()198 vnode::IsCovered() const
199 {
200 return (fFlags & kFlagsCovered) != 0;
201 }
202
203
204 void
SetCovered(bool covered)205 vnode::SetCovered(bool covered)
206 {
207 if (covered)
208 atomic_or(&fFlags, kFlagsCovered);
209 else
210 atomic_and(&fFlags, ~kFlagsCovered);
211 }
212
213
214 bool
IsCovering()215 vnode::IsCovering() const
216 {
217 return (fFlags & kFlagsCovering) != 0;
218 }
219
220
221 void
SetCovering(bool covering)222 vnode::SetCovering(bool covering)
223 {
224 if (covering)
225 atomic_or(&fFlags, kFlagsCovering);
226 else
227 atomic_and(&fFlags, ~kFlagsCovering);
228 }
229
230
231 uint32
Type()232 vnode::Type() const
233 {
234 return (uint32)fFlags & kFlagsType;
235 }
236
237
238 void
SetType(uint32 type)239 vnode::SetType(uint32 type)
240 {
241 atomic_and(&fFlags, ~kFlagsType);
242 atomic_or(&fFlags, type & kFlagsType);
243 }
244
245
246 /*! Locks the vnode.
247 The caller must hold sVnodeLock (at least read locked) and must continue to
248 hold it until calling Unlock(). After acquiring the lock the caller is
249 allowed to write access the vnode's mutable fields, if it hasn't been marked
250 busy by someone else.
251 Due to the condition of holding sVnodeLock at least read locked, write
252 locking it grants the same write access permission to *any* vnode.
253
254 The vnode's lock should be held only for a short time. It can be held over
255 sUnusedVnodesLock.
256
257 \return Always \c true.
258 */
259 bool
Lock()260 vnode::Lock()
261 {
262 if ((atomic_or(&fFlags, kFlagsLocked)
263 & (kFlagsLocked | kFlagsWaitingLocker)) != 0) {
264 _WaitForLock();
265 }
266
267 return true;
268 }
269
270 void
Unlock()271 vnode::Unlock()
272 {
273 if ((atomic_and(&fFlags, ~kFlagsLocked) & kFlagsWaitingLocker) != 0)
274 _WakeUpLocker();
275 }
276
277
278 vnode::Bucket&
_Bucket()279 vnode::_Bucket() const
280 {
281 return sBuckets[((addr_t)this / 64) % kBucketCount];
282 // The vnode structure is somewhat larger than 64 bytes (on 32 bit
283 // archs), so subsequently allocated vnodes fall into different
284 // buckets. How exactly the vnodes are distributed depends on the
285 // allocator -- a dedicated slab would be perfect.
286 }
287
288
289 #endif // VNODE_H
290