1 /*
2 * Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5 #ifndef UNUSED_VNODES_H
6 #define UNUSED_VNODES_H
7
8
9 #include <algorithm>
10
11 #include <util/AutoLock.h>
12 #include <util/list.h>
13
14 #include <low_resource_manager.h>
15
16 #include "Vnode.h"
17
18
19 const static uint32 kMaxUnusedVnodes = 8192;
20 // This is the maximum number of unused vnodes that the system
21 // will keep around (weak limit, if there is enough memory left,
22 // they won't get flushed even when hitting that limit).
23 // It may be chosen with respect to the available memory or enhanced
24 // by some timestamp/frequency heurism.
25
26
27 /*! \brief Guards sUnusedVnodeList and sUnusedVnodes.
28
29 Must have at least a read-lock of sHotVnodesLock when acquiring!
30 */
31 static spinlock sUnusedVnodesLock = B_SPINLOCK_INITIALIZER;
32 typedef DoublyLinkedList<Vnode, DoublyLinkedListMemberGetLink<Vnode, &Vnode::unused_link> >
33 UnusedVnodeList;
34 static UnusedVnodeList sUnusedVnodeList;
35 static uint32 sUnusedVnodes = 0;
36
37 static const int32 kMaxHotVnodes = 1024;
38 static rw_lock sHotVnodesLock = RW_LOCK_INITIALIZER("hot vnodes");
39 static Vnode* sHotVnodes[kMaxHotVnodes];
40 static int32 sNextHotVnodeIndex = 0;
41
42 static const int32 kUnusedVnodesCheckInterval = 64;
43 static int32 sUnusedVnodesCheckCount = 0;
44
45
46 /*! Must be called with sHotVnodesLock write-locked.
47 */
48 static void
flush_hot_vnodes_locked()49 flush_hot_vnodes_locked()
50 {
51 // Since sUnusedVnodesLock is always acquired after sHotVnodesLock,
52 // we can safely hold it for the whole duration of the flush.
53 // We don't want to be descheduled while holding the write-lock, anyway.
54 InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
55
56 int32 count = std::min(sNextHotVnodeIndex, kMaxHotVnodes);
57 for (int32 i = 0; i < count; i++) {
58 Vnode* vnode = sHotVnodes[i];
59 if (vnode == NULL)
60 continue;
61
62 if (vnode->IsHot()) {
63 if (vnode->IsUnused()) {
64 sUnusedVnodeList.Add(vnode);
65 sUnusedVnodes++;
66 }
67 vnode->SetHot(false);
68 }
69
70 sHotVnodes[i] = NULL;
71 }
72
73 unusedLocker.Unlock();
74
75 sNextHotVnodeIndex = 0;
76 }
77
78
79
80 /*! To be called when the vnode's ref count drops to 0.
81 Must be called with sVnodeLock at least read-locked and the vnode locked.
82 \param vnode The vnode.
83 \return \c true, if the caller should trigger unused vnode freeing.
84 */
85 static bool
vnode_unused(Vnode * vnode)86 vnode_unused(Vnode* vnode)
87 {
88 ReadLocker hotReadLocker(sHotVnodesLock);
89
90 vnode->SetUnused(true);
91
92 bool result = false;
93 int32 checkCount = atomic_add(&sUnusedVnodesCheckCount, 1);
94 if (checkCount == kUnusedVnodesCheckInterval) {
95 uint32 unusedCount = atomic_get((int32*)&sUnusedVnodes);
96 if (unusedCount > kMaxUnusedVnodes
97 && low_resource_state(
98 B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY)
99 != B_NO_LOW_RESOURCE) {
100 // there are too many unused vnodes -- tell the caller to free the
101 // oldest ones
102 result = true;
103 } else {
104 // nothing urgent -- reset the counter and re-check then
105 atomic_set(&sUnusedVnodesCheckCount, 0);
106 }
107 }
108
109 // nothing to do, if the node is already hot
110 if (vnode->IsHot())
111 return result;
112
113 // no -- enter it
114 int32 index = atomic_add(&sNextHotVnodeIndex, 1);
115 if (index < kMaxHotVnodes) {
116 vnode->SetHot(true);
117 sHotVnodes[index] = vnode;
118 return result;
119 }
120
121 // the array is full -- it has to be emptied
122 hotReadLocker.Unlock();
123 WriteLocker hotWriteLocker(sHotVnodesLock);
124
125 // unless someone was faster than we were, we have to flush the array
126 if (sNextHotVnodeIndex >= kMaxHotVnodes)
127 flush_hot_vnodes_locked();
128
129 // enter the vnode
130 index = sNextHotVnodeIndex++;
131 vnode->SetHot(true);
132 sHotVnodes[index] = vnode;
133
134 return result;
135 }
136
137
138 /*! To be called when the vnode's ref count is changed from 0 to 1.
139 Must be called with sVnodeLock at least read-locked and the vnode locked.
140 \param vnode The vnode.
141 */
142 static void
vnode_used(Vnode * vnode)143 vnode_used(Vnode* vnode)
144 {
145 ReadLocker hotReadLocker(sHotVnodesLock);
146
147 if (!vnode->IsUnused())
148 return;
149
150 vnode->SetUnused(false);
151
152 if (!vnode->IsHot()) {
153 InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
154 sUnusedVnodeList.Remove(vnode);
155 sUnusedVnodes--;
156 }
157 }
158
159
160 /*! To be called when the vnode's is about to be freed.
161 Must be called with sVnodeLock at least read-locked and the vnode locked.
162 \param vnode The vnode.
163 */
164 static void
vnode_to_be_freed(Vnode * vnode)165 vnode_to_be_freed(Vnode* vnode)
166 {
167 ReadLocker hotReadLocker(sHotVnodesLock);
168
169 if (vnode->IsHot()) {
170 // node is hot -- remove it from the array
171 // TODO: Maybe better completely flush the array while at it?
172 int32 count = atomic_get(&sNextHotVnodeIndex);
173 count = std::min(count, kMaxHotVnodes);
174 for (int32 i = 0; i < count; i++) {
175 if (sHotVnodes[i] == vnode) {
176 sHotVnodes[i] = NULL;
177 break;
178 }
179 }
180 } else if (vnode->IsUnused()) {
181 InterruptsSpinLocker unusedLocker(sUnusedVnodesLock);
182 sUnusedVnodeList.Remove(vnode);
183 sUnusedVnodes--;
184 }
185
186 vnode->SetUnused(false);
187 }
188
189
190 static inline void
flush_hot_vnodes()191 flush_hot_vnodes()
192 {
193 WriteLocker hotWriteLocker(sHotVnodesLock);
194 flush_hot_vnodes_locked();
195 }
196
197
198 static inline void
unused_vnodes_check_started()199 unused_vnodes_check_started()
200 {
201 atomic_set(&sUnusedVnodesCheckCount, kUnusedVnodesCheckInterval + 1);
202 }
203
204
205 static inline void
unused_vnodes_check_done()206 unused_vnodes_check_done()
207 {
208 atomic_set(&sUnusedVnodesCheckCount, 0);
209 }
210
211
212 #endif // UNUSED_VNODES_H
213