xref: /haiku/src/system/kernel/slab/ObjectCache.cpp (revision a3e794ae459fec76826407f8ba8c94cd3535f128)
1 /*
2  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "ObjectCache.h"
10 
11 #include <string.h>
12 
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/VMAddressSpace.h>
16 
17 #include "MemoryManager.h"
18 #include "slab_private.h"
19 
20 
21 RANGE_MARKER_FUNCTION_BEGIN(SlabObjectCache)
22 
23 
24 static void
25 object_cache_return_object_wrapper(object_depot* depot, void* cookie,
26 	void* object, uint32 flags)
27 {
28 	ObjectCache* cache = (ObjectCache*)cookie;
29 
30 	MutexLocker _(cache->lock);
31 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
32 }
33 
34 
35 // #pragma mark -
36 
37 
38 ObjectCache::~ObjectCache()
39 {
40 }
41 
42 
43 status_t
44 ObjectCache::Init(const char* name, size_t objectSize, size_t alignment,
45 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
46 	uint32 flags, void* cookie, object_cache_constructor constructor,
47 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
48 {
49 	strlcpy(this->name, name, sizeof(this->name));
50 
51 	mutex_init(&lock, this->name);
52 
53 	if (objectSize < sizeof(object_link))
54 		objectSize = sizeof(object_link);
55 
56 	if (alignment < kMinObjectAlignment)
57 		alignment = kMinObjectAlignment;
58 
59 	if (alignment > 0 && (objectSize & (alignment - 1)))
60 		object_size = objectSize + alignment - (objectSize & (alignment - 1));
61 	else
62 		object_size = objectSize;
63 
64 	TRACE_CACHE(this, "init %lu, %lu -> %lu", objectSize, alignment,
65 		object_size);
66 
67 	this->alignment = alignment;
68 	cache_color_cycle = 0;
69 	total_objects = 0;
70 	used_count = 0;
71 	empty_count = 0;
72 	pressure = 0;
73 	min_object_reserve = 0;
74 
75 	maintenance_pending = false;
76 	maintenance_in_progress = false;
77 	maintenance_resize = false;
78 	maintenance_delete = false;
79 
80 	usage = 0;
81 	this->maximum = maximum;
82 
83 	this->flags = flags;
84 
85 	resize_request = NULL;
86 	resize_entry_can_wait = NULL;
87 	resize_entry_dont_wait = NULL;
88 
89 	// no gain in using the depot in single cpu setups
90 	if (smp_get_num_cpus() == 1)
91 		this->flags |= CACHE_NO_DEPOT;
92 
93 	if (!(this->flags & CACHE_NO_DEPOT)) {
94 		// Determine usable magazine configuration values if none had been given
95 		if (magazineCapacity == 0) {
96 			magazineCapacity = objectSize < 256
97 				? 32 : (objectSize < 512 ? 16 : 8);
98 		}
99 		if (maxMagazineCount == 0)
100 			maxMagazineCount = magazineCapacity / 2;
101 
102 		status_t status = object_depot_init(&depot, magazineCapacity,
103 			maxMagazineCount, flags, this, object_cache_return_object_wrapper);
104 		if (status != B_OK) {
105 			mutex_destroy(&lock);
106 			return status;
107 		}
108 	}
109 
110 	this->cookie = cookie;
111 	this->constructor = constructor;
112 	this->destructor = destructor;
113 	this->reclaimer = reclaimer;
114 
115 	return B_OK;
116 }
117 
118 
119 slab*
120 ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
121 {
122 	TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
123 		((uint8*)pages) + byteCount, byteCount);
124 
125 	slab->pages = pages;
126 	slab->count = slab->size = byteCount / object_size;
127 	slab->free = NULL;
128 
129 	size_t spareBytes = byteCount - (slab->size * object_size);
130 
131 	slab->offset = cache_color_cycle;
132 
133 	cache_color_cycle += alignment;
134 	if (cache_color_cycle > spareBytes)
135 		cache_color_cycle = 0;
136 
137 	TRACE_CACHE(this, "  %lu objects, %lu spare bytes, offset %lu",
138 		slab->size, spareBytes, slab->offset);
139 
140 	uint8* data = ((uint8*)pages) + slab->offset;
141 
142 	CREATE_PARANOIA_CHECK_SET(slab, "slab");
143 
144 
145 	for (size_t i = 0; i < slab->size; i++) {
146 		status_t status = B_OK;
147 		if (constructor)
148 			status = constructor(cookie, data);
149 
150 		if (status != B_OK) {
151 			data = ((uint8*)pages) + slab->offset;
152 			for (size_t j = 0; j < i; j++) {
153 				if (destructor)
154 					destructor(cookie, data);
155 				data += object_size;
156 			}
157 
158 			DELETE_PARANOIA_CHECK_SET(slab);
159 
160 			return NULL;
161 		}
162 
163 		_push(slab->free, object_to_link(data, object_size));
164 
165 		ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
166 			&object_to_link(data, object_size)->next, sizeof(void*));
167 
168 		data += object_size;
169 	}
170 
171 	return slab;
172 }
173 
174 
175 void
176 ObjectCache::UninitSlab(slab* slab)
177 {
178 	TRACE_CACHE(this, "destruct %p", slab);
179 
180 	if (slab->count != slab->size)
181 		panic("cache: destroying a slab which isn't empty.");
182 
183 	usage -= slab_size;
184 	total_objects -= slab->size;
185 
186 	DELETE_PARANOIA_CHECK_SET(slab);
187 
188 	uint8* data = ((uint8*)slab->pages) + slab->offset;
189 
190 	for (size_t i = 0; i < slab->size; i++) {
191 		if (destructor)
192 			destructor(cookie, data);
193 		data += object_size;
194 	}
195 }
196 
197 
198 void
199 ObjectCache::ReturnObjectToSlab(slab* source, void* object, uint32 flags)
200 {
201 	if (source == NULL) {
202 		panic("object_cache: free'd object %p has no slab", object);
203 		return;
204 	}
205 
206 	ParanoiaChecker _(source);
207 
208 #if KDEBUG >= 1
209 	uint8* objectsStart = (uint8*)source->pages + source->offset;
210 	if (object < objectsStart
211 		|| object >= objectsStart + source->size * object_size
212 		|| ((uint8*)object - objectsStart) % object_size != 0) {
213 		panic("object_cache: tried to free invalid object pointer %p", object);
214 		return;
215 	}
216 #endif // KDEBUG
217 
218 	object_link* link = object_to_link(object, object_size);
219 
220 	TRACE_CACHE(this, "returning %p (%p) to %p, %lu used (%lu empty slabs).",
221 		object, link, source, source->size - source->count,
222 		empty_count);
223 
224 	_push(source->free, link);
225 	source->count++;
226 	used_count--;
227 
228 	ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*));
229 
230 	if (source->count == source->size) {
231 		partial.Remove(source);
232 
233 		if (empty_count < pressure
234 			&& total_objects - used_count - source->size
235 				>= min_object_reserve) {
236 			empty_count++;
237 			empty.Add(source);
238 		} else {
239 			ReturnSlab(source, flags);
240 		}
241 	} else if (source->count == 1) {
242 		full.Remove(source);
243 		partial.Add(source);
244 	}
245 }
246 
247 
248 void*
249 ObjectCache::ObjectAtIndex(slab* source, int32 index) const
250 {
251 	return (uint8*)source->pages + source->offset + index * object_size;
252 }
253 
254 
255 #if PARANOID_KERNEL_FREE
256 
257 bool
258 ObjectCache::AssertObjectNotFreed(void* object)
259 {
260 	MutexLocker locker(lock);
261 
262 	slab* source = ObjectSlab(object);
263 	if (!partial.Contains(source) && !full.Contains(source)) {
264 		panic("object_cache: to be freed object %p: slab not part of cache!",
265 			object);
266 		return false;
267 	}
268 
269 	object_link* link = object_to_link(object, object_size);
270 	for (object_link* freeLink = source->free; freeLink != NULL;
271 			freeLink = freeLink->next) {
272 		if (freeLink == link) {
273 			panic("object_cache: double free of %p (slab %p, cache %p)",
274 				object, source, this);
275 			return false;
276 		}
277 	}
278 
279 	return true;
280 }
281 
282 #endif // PARANOID_KERNEL_FREE
283 
284 
285 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
286 
287 status_t
288 ObjectCache::AllocateTrackingInfos(slab* slab, size_t byteCount, uint32 flags)
289 {
290 	void* pages;
291 	size_t objectCount = byteCount / object_size;
292 	status_t result = MemoryManager::AllocateRaw(
293 		objectCount * sizeof(AllocationTrackingInfo), flags, pages);
294 	if (result == B_OK) {
295 		slab->tracking = (AllocationTrackingInfo*)pages;
296 		for (size_t i = 0; i < objectCount; i++)
297 			slab->tracking[i].Clear();
298 	}
299 
300 	return result;
301 }
302 
303 
304 void
305 ObjectCache::FreeTrackingInfos(slab* slab, uint32 flags)
306 {
307 	MemoryManager::FreeRawOrReturnCache(slab->tracking, flags);
308 }
309 
310 
311 AllocationTrackingInfo*
312 ObjectCache::TrackingInfoFor(void* object) const
313 {
314 	slab* objectSlab = ObjectSlab(object);
315 	return &objectSlab->tracking[((addr_t)object - objectSlab->offset
316 		- (addr_t)objectSlab->pages) / object_size];
317 }
318 
319 #endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
320 
321 
322 RANGE_MARKER_FUNCTION_END(SlabObjectCache)
323