xref: /haiku/src/system/kernel/slab/ObjectCache.cpp (revision 675ffabd70492a962f8c0288a32208c22ce5de18)
1 /*
2  * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
3  * Copyright 2007, Hugo Santos. All Rights Reserved.
4  *
5  * Distributed under the terms of the MIT License.
6  */
7 
8 
9 #include "ObjectCache.h"
10 
11 #include <string.h>
12 
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/VMAddressSpace.h>
16 
17 #include "slab_private.h"
18 
19 
20 static void
21 object_cache_return_object_wrapper(object_depot* depot, void* cookie,
22 	void* object, uint32 flags)
23 {
24 	ObjectCache* cache = (ObjectCache*)cookie;
25 
26 	MutexLocker _(cache->lock);
27 	cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
28 }
29 
30 
31 // #pragma mark -
32 
33 
34 ObjectCache::~ObjectCache()
35 {
36 }
37 
38 
39 status_t
40 ObjectCache::Init(const char* name, size_t objectSize, size_t alignment,
41 	size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
42 	uint32 flags, void* cookie, object_cache_constructor constructor,
43 	object_cache_destructor destructor, object_cache_reclaimer reclaimer)
44 {
45 	strlcpy(this->name, name, sizeof(this->name));
46 
47 	mutex_init(&lock, this->name);
48 
49 	if (objectSize < sizeof(object_link))
50 		objectSize = sizeof(object_link);
51 
52 	if (alignment < kMinObjectAlignment)
53 		alignment = kMinObjectAlignment;
54 
55 	if (alignment > 0 && (objectSize & (alignment - 1)))
56 		object_size = objectSize + alignment - (objectSize & (alignment - 1));
57 	else
58 		object_size = objectSize;
59 
60 	TRACE_CACHE(this, "init %lu, %lu -> %lu", objectSize, alignment,
61 		object_size);
62 
63 	this->alignment = alignment;
64 	cache_color_cycle = 0;
65 	total_objects = 0;
66 	used_count = 0;
67 	empty_count = 0;
68 	pressure = 0;
69 	min_object_reserve = 0;
70 
71 	maintenance_pending = false;
72 	maintenance_in_progress = false;
73 	maintenance_resize = false;
74 	maintenance_delete = false;
75 
76 	usage = 0;
77 	this->maximum = maximum;
78 
79 	this->flags = flags;
80 
81 	resize_request = NULL;
82 	resize_entry_can_wait = NULL;
83 	resize_entry_dont_wait = NULL;
84 
85 	// no gain in using the depot in single cpu setups
86 	if (smp_get_num_cpus() == 1)
87 		this->flags |= CACHE_NO_DEPOT;
88 
89 	if (!(this->flags & CACHE_NO_DEPOT)) {
90 		// Determine usable magazine configuration values if none had been given
91 		if (magazineCapacity == 0) {
92 			magazineCapacity = objectSize < 256
93 				? 32 : (objectSize < 512 ? 16 : 8);
94 		}
95 		if (maxMagazineCount == 0)
96 			maxMagazineCount = magazineCapacity / 2;
97 
98 		status_t status = object_depot_init(&depot, magazineCapacity,
99 			maxMagazineCount, flags, this, object_cache_return_object_wrapper);
100 		if (status != B_OK) {
101 			mutex_destroy(&lock);
102 			return status;
103 		}
104 	}
105 
106 	this->cookie = cookie;
107 	this->constructor = constructor;
108 	this->destructor = destructor;
109 	this->reclaimer = reclaimer;
110 
111 	return B_OK;
112 }
113 
114 
115 slab*
116 ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
117 {
118 	TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
119 		((uint8*)pages) + byteCount, byteCount);
120 
121 	slab->pages = pages;
122 	slab->count = slab->size = byteCount / object_size;
123 	slab->free = NULL;
124 
125 	size_t spareBytes = byteCount - (slab->size * object_size);
126 
127 	slab->offset = cache_color_cycle;
128 
129 	cache_color_cycle += alignment;
130 	if (cache_color_cycle > spareBytes)
131 		cache_color_cycle = 0;
132 
133 	TRACE_CACHE(this, "  %lu objects, %lu spare bytes, offset %lu",
134 		slab->size, spareBytes, slab->offset);
135 
136 	uint8* data = ((uint8*)pages) + slab->offset;
137 
138 	CREATE_PARANOIA_CHECK_SET(slab, "slab");
139 
140 	for (size_t i = 0; i < slab->size; i++) {
141 		status_t status = B_OK;
142 		if (constructor)
143 			status = constructor(cookie, data);
144 
145 		if (status != B_OK) {
146 			data = ((uint8*)pages) + slab->offset;
147 			for (size_t j = 0; j < i; j++) {
148 				if (destructor)
149 					destructor(cookie, data);
150 				data += object_size;
151 			}
152 
153 			DELETE_PARANOIA_CHECK_SET(slab);
154 
155 			return NULL;
156 		}
157 
158 		_push(slab->free, object_to_link(data, object_size));
159 
160 		ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
161 			&object_to_link(data, object_size)->next, sizeof(void*));
162 
163 		data += object_size;
164 	}
165 
166 	return slab;
167 }
168 
169 
170 void
171 ObjectCache::UninitSlab(slab* slab)
172 {
173 	TRACE_CACHE(this, "destruct %p", slab);
174 
175 	if (slab->count != slab->size)
176 		panic("cache: destroying a slab which isn't empty.");
177 
178 	usage -= slab_size;
179 	total_objects -= slab->size;
180 
181 	DELETE_PARANOIA_CHECK_SET(slab);
182 
183 	uint8* data = ((uint8*)slab->pages) + slab->offset;
184 
185 	for (size_t i = 0; i < slab->size; i++) {
186 		if (destructor)
187 			destructor(cookie, data);
188 		data += object_size;
189 	}
190 }
191 
192 
193 void
194 ObjectCache::ReturnObjectToSlab(slab* source, void* object, uint32 flags)
195 {
196 	if (source == NULL) {
197 		panic("object_cache: free'd object has no slab");
198 		return;
199 	}
200 
201 	ParanoiaChecker _(source);
202 
203 #if KDEBUG >= 1
204 	uint8* objectsStart = (uint8*)source->pages + source->offset;
205 	if (object < objectsStart
206 		|| object >= objectsStart + source->size * object_size
207 		|| ((uint8*)object - objectsStart) % object_size != 0) {
208 		panic("object_cache: tried to free invalid object pointer");
209 		return;
210 	}
211 #endif // KDEBUG
212 
213 	object_link* link = object_to_link(object, object_size);
214 
215 	TRACE_CACHE(this, "returning %p (%p) to %p, %lu used (%lu empty slabs).",
216 		object, link, source, source->size - source->count,
217 		empty_count);
218 
219 	_push(source->free, link);
220 	source->count++;
221 	used_count--;
222 
223 	ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*));
224 
225 	if (source->count == source->size) {
226 		partial.Remove(source);
227 
228 		if (empty_count < pressure
229 			&& total_objects - used_count - source->size
230 				>= min_object_reserve) {
231 			empty_count++;
232 			empty.Add(source);
233 		} else {
234 			ReturnSlab(source, flags);
235 		}
236 	} else if (source->count == 1) {
237 		full.Remove(source);
238 		partial.Add(source);
239 	}
240 }
241