xref: /haiku/src/system/kernel/slab/ObjectCache.cpp (revision 344ded80d400028c8f561b4b876257b94c12db4a)
1 /*
2  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "ObjectCache.h"
10 
11 #include <string.h>
12 
13 #include <smp.h>
14 #include <util/AutoLock.h>
15 #include <vm/vm.h>
16 #include <vm/VMAddressSpace.h>
17 
18 #include "MemoryManager.h"
19 #include "slab_private.h"
20 
21 
22 RANGE_MARKER_FUNCTION_BEGIN(SlabObjectCache)
23 
24 
25 static void
26 object_cache_return_object_wrapper(object_depot* depot, void* cookie,
27 	void* object, uint32 flags)
28 {
29 	ObjectCache* cache = (ObjectCache*)cookie;
30 
31 	MutexLocker _(cache->lock);
32 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
33 }
34 
35 
36 // #pragma mark -
37 
38 
39 ObjectCache::~ObjectCache()
40 {
41 }
42 
43 
44 status_t
45 ObjectCache::Init(const char* name, size_t objectSize, size_t alignment,
46 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
47 	uint32 flags, void* cookie, object_cache_constructor constructor,
48 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
49 {
50 	strlcpy(this->name, name, sizeof(this->name));
51 
52 	mutex_init(&lock, this->name);
53 
54 	if (objectSize < sizeof(object_link))
55 		objectSize = sizeof(object_link);
56 
57 	if (alignment < kMinObjectAlignment)
58 		alignment = kMinObjectAlignment;
59 
60 	if (alignment > 0 && (objectSize & (alignment - 1)))
61 		object_size = objectSize + alignment - (objectSize & (alignment - 1));
62 	else
63 		object_size = objectSize;
64 
65 	TRACE_CACHE(this, "init %lu, %lu -> %lu", objectSize, alignment,
66 		object_size);
67 
68 	this->alignment = alignment;
69 	cache_color_cycle = 0;
70 	total_objects = 0;
71 	used_count = 0;
72 	empty_count = 0;
73 	pressure = 0;
74 	min_object_reserve = 0;
75 
76 	maintenance_pending = false;
77 	maintenance_in_progress = false;
78 	maintenance_resize = false;
79 	maintenance_delete = false;
80 
81 	usage = 0;
82 	this->maximum = maximum;
83 
84 	this->flags = flags;
85 
86 	resize_request = NULL;
87 	resize_entry_can_wait = NULL;
88 	resize_entry_dont_wait = NULL;
89 
90 	// no gain in using the depot in single cpu setups
91 	if (smp_get_num_cpus() == 1)
92 		this->flags |= CACHE_NO_DEPOT;
93 
94 	if (!(this->flags & CACHE_NO_DEPOT)) {
95 		// Determine usable magazine configuration values if none had been given
96 		if (magazineCapacity == 0) {
97 			magazineCapacity = objectSize < 256
98 				? 32 : (objectSize < 512 ? 16 : 8);
99 		}
100 		if (maxMagazineCount == 0)
101 			maxMagazineCount = magazineCapacity / 2;
102 
103 		status_t status = object_depot_init(&depot, magazineCapacity,
104 			maxMagazineCount, flags, this, object_cache_return_object_wrapper);
105 		if (status != B_OK) {
106 			mutex_destroy(&lock);
107 			return status;
108 		}
109 	}
110 
111 	this->cookie = cookie;
112 	this->constructor = constructor;
113 	this->destructor = destructor;
114 	this->reclaimer = reclaimer;
115 
116 	return B_OK;
117 }
118 
119 
120 slab*
121 ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
122 {
123 	TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
124 		((uint8*)pages) + byteCount, byteCount);
125 
126 	slab->pages = pages;
127 	slab->count = slab->size = byteCount / object_size;
128 	slab->free = NULL;
129 
130 	size_t spareBytes = byteCount - (slab->size * object_size);
131 
132 	slab->offset = cache_color_cycle;
133 
134 	cache_color_cycle += alignment;
135 	if (cache_color_cycle > spareBytes)
136 		cache_color_cycle = 0;
137 
138 	TRACE_CACHE(this, "  %lu objects, %lu spare bytes, offset %lu",
139 		slab->size, spareBytes, slab->offset);
140 
141 	uint8* data = ((uint8*)pages) + slab->offset;
142 
143 	CREATE_PARANOIA_CHECK_SET(slab, "slab");
144 
145 
146 	for (size_t i = 0; i < slab->size; i++) {
147 		status_t status = B_OK;
148 		if (constructor)
149 			status = constructor(cookie, data);
150 
151 		if (status != B_OK) {
152 			data = ((uint8*)pages) + slab->offset;
153 			for (size_t j = 0; j < i; j++) {
154 				if (destructor)
155 					destructor(cookie, data);
156 				data += object_size;
157 			}
158 
159 			DELETE_PARANOIA_CHECK_SET(slab);
160 
161 			return NULL;
162 		}
163 
164 		_push(slab->free, object_to_link(data, object_size));
165 
166 		ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
167 			&object_to_link(data, object_size)->next, sizeof(void*));
168 
169 		data += object_size;
170 	}
171 
172 	return slab;
173 }
174 
175 
176 void
177 ObjectCache::UninitSlab(slab* slab)
178 {
179 	TRACE_CACHE(this, "destruct %p", slab);
180 
181 	if (slab->count != slab->size)
182 		panic("cache: destroying a slab which isn't empty.");
183 
184 	usage -= slab_size;
185 	total_objects -= slab->size;
186 
187 	DELETE_PARANOIA_CHECK_SET(slab);
188 
189 	uint8* data = ((uint8*)slab->pages) + slab->offset;
190 
191 	for (size_t i = 0; i < slab->size; i++) {
192 		if (destructor)
193 			destructor(cookie, data);
194 		data += object_size;
195 	}
196 }
197 
198 
199 void
200 ObjectCache::ReturnObjectToSlab(slab* source, void* object, uint32 flags)
201 {
202 	if (source == NULL) {
203 		panic("object_cache: free'd object %p has no slab", object);
204 		return;
205 	}
206 
207 	ParanoiaChecker _(source);
208 
209 #if KDEBUG >= 1
210 	uint8* objectsStart = (uint8*)source->pages + source->offset;
211 	if (object < objectsStart
212 		|| object >= objectsStart + source->size * object_size
213 		|| ((uint8*)object - objectsStart) % object_size != 0) {
214 		panic("object_cache: tried to free invalid object pointer %p", object);
215 		return;
216 	}
217 #endif // KDEBUG
218 
219 	object_link* link = object_to_link(object, object_size);
220 
221 	TRACE_CACHE(this, "returning %p (%p) to %p, %lu used (%lu empty slabs).",
222 		object, link, source, source->size - source->count,
223 		empty_count);
224 
225 	_push(source->free, link);
226 	source->count++;
227 	used_count--;
228 
229 	ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*));
230 
231 	if (source->count == source->size) {
232 		partial.Remove(source);
233 
234 		if (empty_count < pressure
235 				|| (total_objects - (used_count + source->size))
236 					< min_object_reserve) {
237 			empty_count++;
238 			empty.Add(source);
239 		} else {
240 			ReturnSlab(source, flags);
241 		}
242 	} else if (source->count == 1) {
243 		full.Remove(source);
244 		partial.Add(source);
245 	}
246 }
247 
248 
249 void*
250 ObjectCache::ObjectAtIndex(slab* source, int32 index) const
251 {
252 	return (uint8*)source->pages + source->offset + index * object_size;
253 }
254 
255 
256 #if PARANOID_KERNEL_FREE
257 
258 bool
259 ObjectCache::AssertObjectNotFreed(void* object)
260 {
261 	MutexLocker locker(lock);
262 
263 	slab* source = ObjectSlab(object);
264 	if (!partial.Contains(source) && !full.Contains(source)) {
265 		panic("object_cache: to be freed object %p: slab not part of cache!",
266 			object);
267 		return false;
268 	}
269 
270 	object_link* link = object_to_link(object, object_size);
271 	for (object_link* freeLink = source->free; freeLink != NULL;
272 			freeLink = freeLink->next) {
273 		if (freeLink == link) {
274 			panic("object_cache: double free of %p (slab %p, cache %p)",
275 				object, source, this);
276 			return false;
277 		}
278 	}
279 
280 	return true;
281 }
282 
283 #endif // PARANOID_KERNEL_FREE
284 
285 
286 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
287 
288 status_t
289 ObjectCache::AllocateTrackingInfos(slab* slab, size_t byteCount, uint32 flags)
290 {
291 	void* pages;
292 	size_t objectCount = byteCount / object_size;
293 	status_t result = MemoryManager::AllocateRaw(
294 		objectCount * sizeof(AllocationTrackingInfo), flags, pages);
295 	if (result == B_OK) {
296 		slab->tracking = (AllocationTrackingInfo*)pages;
297 		for (size_t i = 0; i < objectCount; i++)
298 			slab->tracking[i].Clear();
299 	}
300 
301 	return result;
302 }
303 
304 
305 void
306 ObjectCache::FreeTrackingInfos(slab* slab, uint32 flags)
307 {
308 	MemoryManager::FreeRawOrReturnCache(slab->tracking, flags);
309 }
310 
311 
312 AllocationTrackingInfo*
313 ObjectCache::TrackingInfoFor(void* object) const
314 {
315 	slab* objectSlab = ObjectSlab(object);
316 	return &objectSlab->tracking[((addr_t)object - objectSlab->offset
317 		- (addr_t)objectSlab->pages) / object_size];
318 }
319 
320 #endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
321 
322 
323 RANGE_MARKER_FUNCTION_END(SlabObjectCache)
324