xref: /haiku/src/system/kernel/fs/EntryCache.cpp (revision abc06f6bc23f999730078f98d26776499b8eabb7)
1 /*
2  * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "EntryCache.h"
8 
9 #include <new>
10 #include <vm/vm.h>
11 #include <slab/Slab.h>
12 
13 
14 static const int32 kEntryNotInArray = -1;
15 static const int32 kEntryRemoved = -2;
16 
17 
18 // #pragma mark - EntryCacheGeneration
19 
20 
21 EntryCacheGeneration::EntryCacheGeneration()
22 	:
23 	next_index(0),
24 	entries(NULL)
25 {
26 }
27 
28 
29 EntryCacheGeneration::~EntryCacheGeneration()
30 {
31 	delete[] entries;
32 }
33 
34 
35 status_t
36 EntryCacheGeneration::Init(int32 entriesSize)
37 {
38 	entries_size = entriesSize;
39 	entries = new(std::nothrow) EntryCacheEntry*[entries_size];
40 	if (entries == NULL)
41 		return B_NO_MEMORY;
42 
43 	memset(entries, 0, sizeof(EntryCacheEntry*) * entries_size);
44 	return B_OK;
45 }
46 
47 
48 // #pragma mark - EntryCache
49 
50 
51 EntryCache::EntryCache()
52 	:
53 	fGenerationCount(0),
54 	fGenerations(NULL),
55 	fCurrentGeneration(0)
56 {
57 	rw_lock_init(&fLock, "entry cache");
58 
59 	new(&fEntries) EntryTable;
60 }
61 
62 
63 EntryCache::~EntryCache()
64 {
65 	// delete entries
66 	EntryCacheEntry* entry = fEntries.Clear(true);
67 	while (entry != NULL) {
68 		EntryCacheEntry* next = entry->hash_link;
69 		free(entry);
70 		entry = next;
71 	}
72 	delete[] fGenerations;
73 
74 	rw_lock_destroy(&fLock);
75 }
76 
77 
78 status_t
79 EntryCache::Init()
80 {
81 	status_t error = fEntries.Init();
82 	if (error != B_OK)
83 		return error;
84 
85 	int32 entriesSize = 1024;
86 	fGenerationCount = 8;
87 
88 	// TODO: Choose generation size/count more scientifically?
89 	// TODO: Add low_resource handler hook?
90 	if (vm_available_memory() >= (1024*1024*1024)) {
91 		entriesSize = 8096;
92 		fGenerationCount = 16;
93 	}
94 
95 	fGenerations = new(std::nothrow) EntryCacheGeneration[fGenerationCount];
96 	for (int32 i = 0; i < fGenerationCount; i++) {
97 		error = fGenerations[i].Init(entriesSize);
98 		if (error != B_OK)
99 			return error;
100 	}
101 
102 	return B_OK;
103 }
104 
105 
106 status_t
107 EntryCache::Add(ino_t dirID, const char* name, ino_t nodeID, bool missing)
108 {
109 	EntryCacheKey key(dirID, name);
110 
111 	WriteLocker _(fLock);
112 
113 	if (fGenerationCount == 0)
114 		return B_NO_MEMORY;
115 
116 	EntryCacheEntry* entry = fEntries.Lookup(key);
117 	if (entry != NULL) {
118 		entry->node_id = nodeID;
119 		entry->missing = missing;
120 		if (entry->generation != fCurrentGeneration) {
121 			if (entry->index >= 0) {
122 				fGenerations[entry->generation].entries[entry->index] = NULL;
123 				_AddEntryToCurrentGeneration(entry);
124 			}
125 		}
126 		return B_OK;
127 	}
128 
129 	// Avoid deadlock if system had to wait for free memory
130 	entry = (EntryCacheEntry*)malloc_etc(sizeof(EntryCacheEntry) + strlen(name),
131 		CACHE_DONT_WAIT_FOR_MEMORY);
132 
133 	if (entry == NULL)
134 		return B_NO_MEMORY;
135 
136 	entry->node_id = nodeID;
137 	entry->dir_id = dirID;
138 	entry->missing = missing;
139 	entry->generation = fCurrentGeneration;
140 	entry->index = kEntryNotInArray;
141 	strcpy(entry->name, name);
142 
143 	fEntries.Insert(entry);
144 
145 	_AddEntryToCurrentGeneration(entry);
146 
147 	return B_OK;
148 }
149 
150 
151 status_t
152 EntryCache::Remove(ino_t dirID, const char* name)
153 {
154 	EntryCacheKey key(dirID, name);
155 
156 	WriteLocker writeLocker(fLock);
157 
158 	EntryCacheEntry* entry = fEntries.Lookup(key);
159 	if (entry == NULL)
160 		return B_ENTRY_NOT_FOUND;
161 
162 	fEntries.Remove(entry);
163 
164 	if (entry->index >= 0) {
165 		// remove the entry from its generation and delete it
166 		fGenerations[entry->generation].entries[entry->index] = NULL;
167 		free(entry);
168 	} else {
169 		// We can't free it, since another thread is about to try to move it
170 		// to another generation. We mark it removed and the other thread will
171 		// take care of deleting it.
172 		entry->index = kEntryRemoved;
173 	}
174 
175 	return B_OK;
176 }
177 
178 
179 bool
180 EntryCache::Lookup(ino_t dirID, const char* name, ino_t& _nodeID,
181 	bool& _missing)
182 {
183 	EntryCacheKey key(dirID, name);
184 
185 	ReadLocker readLocker(fLock);
186 
187 	EntryCacheEntry* entry = fEntries.Lookup(key);
188 	if (entry == NULL)
189 		return false;
190 
191 	const int32 oldGeneration = atomic_get_and_set(&entry->generation,
192 		fCurrentGeneration);
193 	if (oldGeneration == fCurrentGeneration || entry->index < 0) {
194 		// The entry is already in the current generation or is being moved to
195 		// it by another thread.
196 		_nodeID = entry->node_id;
197 		_missing = entry->missing;
198 		return true;
199 	}
200 
201 	// remove from old generation array
202 	fGenerations[oldGeneration].entries[entry->index] = NULL;
203 	entry->index = kEntryNotInArray;
204 
205 	// add to the current generation
206 	const int32 index = atomic_add(&fGenerations[fCurrentGeneration].next_index, 1);
207 	if (index < fGenerations[fCurrentGeneration].entries_size) {
208 		fGenerations[fCurrentGeneration].entries[index] = entry;
209 		entry->index = index;
210 		_nodeID = entry->node_id;
211 		_missing = entry->missing;
212 		return true;
213 	}
214 
215 	// The current generation is full, so we probably need to clear the oldest
216 	// one to make room. We need the write lock for that.
217 	readLocker.Unlock();
218 	WriteLocker writeLocker(fLock);
219 
220 	if (entry->index == kEntryRemoved) {
221 		// the entry has been removed in the meantime
222 		free(entry);
223 		return false;
224 	}
225 
226 	_AddEntryToCurrentGeneration(entry);
227 
228 	_nodeID = entry->node_id;
229 	_missing = entry->missing;
230 	return true;
231 }
232 
233 
234 const char*
235 EntryCache::DebugReverseLookup(ino_t nodeID, ino_t& _dirID)
236 {
237 	for (EntryTable::Iterator it = fEntries.GetIterator();
238 			EntryCacheEntry* entry = it.Next();) {
239 		if (nodeID == entry->node_id && strcmp(entry->name, ".") != 0
240 				&& strcmp(entry->name, "..") != 0) {
241 			_dirID = entry->dir_id;
242 			return entry->name;
243 		}
244 	}
245 
246 	return NULL;
247 }
248 
249 
250 void
251 EntryCache::_AddEntryToCurrentGeneration(EntryCacheEntry* entry)
252 {
253 	ASSERT_WRITE_LOCKED_RW_LOCK(&fLock);
254 
255 	// the generation might not be full yet
256 	int32 index = fGenerations[fCurrentGeneration].next_index++;
257 	if (index < fGenerations[fCurrentGeneration].entries_size) {
258 		fGenerations[fCurrentGeneration].entries[index] = entry;
259 		entry->generation = fCurrentGeneration;
260 		entry->index = index;
261 		return;
262 	}
263 
264 	// we have to clear the oldest generation
265 	const int32 newGeneration = (fCurrentGeneration + 1) % fGenerationCount;
266 	for (int32 i = 0; i < fGenerations[newGeneration].entries_size; i++) {
267 		EntryCacheEntry* otherEntry = fGenerations[newGeneration].entries[i];
268 		if (otherEntry == NULL)
269 			continue;
270 
271 		fGenerations[newGeneration].entries[i] = NULL;
272 		fEntries.Remove(otherEntry);
273 		free(otherEntry);
274 	}
275 
276 	// set the new generation and add the entry
277 	fCurrentGeneration = newGeneration;
278 	fGenerations[newGeneration].entries[0] = entry;
279 	fGenerations[newGeneration].next_index = 1;
280 	entry->generation = newGeneration;
281 	entry->index = 0;
282 }
283