1 /*
2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7 #include "EntryCache.h"
8
9 #include <new>
10 #include <vm/vm.h>
11 #include <slab/Slab.h>
12
13
14 static const int32 kEntryNotInArray = -1;
15 static const int32 kEntryRemoved = -2;
16
17
18 // #pragma mark - EntryCacheGeneration
19
20
EntryCacheGeneration()21 EntryCacheGeneration::EntryCacheGeneration()
22 :
23 next_index(0),
24 entries(NULL)
25 {
26 }
27
28
~EntryCacheGeneration()29 EntryCacheGeneration::~EntryCacheGeneration()
30 {
31 delete[] entries;
32 }
33
34
35 status_t
Init(int32 entriesSize)36 EntryCacheGeneration::Init(int32 entriesSize)
37 {
38 entries_size = entriesSize;
39 entries = new(std::nothrow) EntryCacheEntry*[entries_size];
40 if (entries == NULL)
41 return B_NO_MEMORY;
42
43 memset(entries, 0, sizeof(EntryCacheEntry*) * entries_size);
44 return B_OK;
45 }
46
47
48 // #pragma mark - EntryCache
49
50
EntryCache()51 EntryCache::EntryCache()
52 :
53 fGenerationCount(0),
54 fGenerations(NULL),
55 fCurrentGeneration(0)
56 {
57 rw_lock_init(&fLock, "entry cache");
58
59 new(&fEntries) EntryTable;
60 }
61
62
~EntryCache()63 EntryCache::~EntryCache()
64 {
65 // delete entries
66 EntryCacheEntry* entry = fEntries.Clear(true);
67 while (entry != NULL) {
68 EntryCacheEntry* next = entry->hash_link;
69 free(entry);
70 entry = next;
71 }
72 delete[] fGenerations;
73
74 rw_lock_destroy(&fLock);
75 }
76
77
78 status_t
Init()79 EntryCache::Init()
80 {
81 status_t error = fEntries.Init();
82 if (error != B_OK)
83 return error;
84
85 int32 entriesSize = 1024;
86 fGenerationCount = 8;
87
88 // TODO: Choose generation size/count more scientifically?
89 // TODO: Add low_resource handler hook?
90 if (vm_available_memory() >= (1024*1024*1024)) {
91 entriesSize = 8192;
92 fGenerationCount = 16;
93 }
94
95 fGenerations = new(std::nothrow) EntryCacheGeneration[fGenerationCount];
96 for (int32 i = 0; i < fGenerationCount; i++) {
97 error = fGenerations[i].Init(entriesSize);
98 if (error != B_OK)
99 return error;
100 }
101
102 return B_OK;
103 }
104
105
106 status_t
Add(ino_t dirID,const char * name,ino_t nodeID,bool missing)107 EntryCache::Add(ino_t dirID, const char* name, ino_t nodeID, bool missing)
108 {
109 EntryCacheKey key(dirID, name);
110
111 WriteLocker _(fLock);
112
113 if (fGenerationCount == 0)
114 return B_NO_MEMORY;
115
116 EntryCacheEntry* entry = fEntries.Lookup(key);
117 if (entry != NULL) {
118 entry->node_id = nodeID;
119 entry->missing = missing;
120 if (entry->generation != fCurrentGeneration) {
121 if (entry->index >= 0) {
122 fGenerations[entry->generation].entries[entry->index] = NULL;
123 _AddEntryToCurrentGeneration(entry);
124 }
125 }
126 return B_OK;
127 }
128
129 // Avoid deadlock if system had to wait for free memory
130 const size_t nameLen = strlen(name);
131 entry = (EntryCacheEntry*)malloc_etc(sizeof(EntryCacheEntry) + nameLen,
132 CACHE_DONT_WAIT_FOR_MEMORY);
133
134 if (entry == NULL)
135 return B_NO_MEMORY;
136
137 entry->node_id = nodeID;
138 entry->dir_id = dirID;
139 entry->hash = EntryCacheKey::Hash(dirID, name);
140 entry->missing = missing;
141 entry->generation = fCurrentGeneration;
142 entry->index = kEntryNotInArray;
143 memcpy(entry->name, name, nameLen + 1);
144
145 fEntries.Insert(entry);
146
147 _AddEntryToCurrentGeneration(entry);
148
149 return B_OK;
150 }
151
152
153 status_t
Remove(ino_t dirID,const char * name)154 EntryCache::Remove(ino_t dirID, const char* name)
155 {
156 EntryCacheKey key(dirID, name);
157
158 WriteLocker writeLocker(fLock);
159
160 EntryCacheEntry* entry = fEntries.Lookup(key);
161 if (entry == NULL)
162 return B_ENTRY_NOT_FOUND;
163
164 fEntries.Remove(entry);
165
166 if (entry->index >= 0) {
167 // remove the entry from its generation and delete it
168 fGenerations[entry->generation].entries[entry->index] = NULL;
169 free(entry);
170 } else {
171 // We can't free it, since another thread is about to try to move it
172 // to another generation. We mark it removed and the other thread will
173 // take care of deleting it.
174 entry->index = kEntryRemoved;
175 }
176
177 return B_OK;
178 }
179
180
181 bool
Lookup(ino_t dirID,const char * name,ino_t & _nodeID,bool & _missing)182 EntryCache::Lookup(ino_t dirID, const char* name, ino_t& _nodeID,
183 bool& _missing)
184 {
185 EntryCacheKey key(dirID, name);
186
187 ReadLocker readLocker(fLock);
188
189 EntryCacheEntry* entry = fEntries.Lookup(key);
190 if (entry == NULL)
191 return false;
192
193 const int32 oldGeneration = atomic_get_and_set(&entry->generation,
194 fCurrentGeneration);
195 if (oldGeneration == fCurrentGeneration || entry->index < 0) {
196 // The entry is already in the current generation or is being moved to
197 // it by another thread.
198 _nodeID = entry->node_id;
199 _missing = entry->missing;
200 return true;
201 }
202
203 // remove from old generation array
204 fGenerations[oldGeneration].entries[entry->index] = NULL;
205 entry->index = kEntryNotInArray;
206
207 // add to the current generation
208 const int32 index = atomic_add(&fGenerations[fCurrentGeneration].next_index, 1);
209 if (index < fGenerations[fCurrentGeneration].entries_size) {
210 fGenerations[fCurrentGeneration].entries[index] = entry;
211 entry->index = index;
212 _nodeID = entry->node_id;
213 _missing = entry->missing;
214 return true;
215 }
216
217 // The current generation is full, so we probably need to clear the oldest
218 // one to make room. We need the write lock for that.
219 readLocker.Unlock();
220 WriteLocker writeLocker(fLock);
221
222 if (entry->index == kEntryRemoved) {
223 // the entry has been removed in the meantime
224 free(entry);
225 return false;
226 }
227
228 _AddEntryToCurrentGeneration(entry);
229
230 _nodeID = entry->node_id;
231 _missing = entry->missing;
232 return true;
233 }
234
235
236 const char*
DebugReverseLookup(ino_t nodeID,ino_t & _dirID)237 EntryCache::DebugReverseLookup(ino_t nodeID, ino_t& _dirID)
238 {
239 for (EntryTable::Iterator it = fEntries.GetIterator();
240 EntryCacheEntry* entry = it.Next();) {
241 if (nodeID == entry->node_id && strcmp(entry->name, ".") != 0
242 && strcmp(entry->name, "..") != 0) {
243 _dirID = entry->dir_id;
244 return entry->name;
245 }
246 }
247
248 return NULL;
249 }
250
251
252 void
_AddEntryToCurrentGeneration(EntryCacheEntry * entry)253 EntryCache::_AddEntryToCurrentGeneration(EntryCacheEntry* entry)
254 {
255 ASSERT_WRITE_LOCKED_RW_LOCK(&fLock);
256
257 // the generation might not be full yet
258 int32 index = fGenerations[fCurrentGeneration].next_index++;
259 if (index < fGenerations[fCurrentGeneration].entries_size) {
260 fGenerations[fCurrentGeneration].entries[index] = entry;
261 entry->generation = fCurrentGeneration;
262 entry->index = index;
263 return;
264 }
265
266 // we have to clear the oldest generation
267 const int32 newGeneration = (fCurrentGeneration + 1) % fGenerationCount;
268 for (int32 i = 0; i < fGenerations[newGeneration].entries_size; i++) {
269 EntryCacheEntry* otherEntry = fGenerations[newGeneration].entries[i];
270 if (otherEntry == NULL)
271 continue;
272
273 fGenerations[newGeneration].entries[i] = NULL;
274 fEntries.Remove(otherEntry);
275 free(otherEntry);
276 }
277
278 // set the new generation and add the entry
279 fCurrentGeneration = newGeneration;
280 fGenerations[newGeneration].entries[0] = entry;
281 fGenerations[newGeneration].next_index = 1;
282 entry->generation = newGeneration;
283 entry->index = 0;
284 }
285