xref: /haiku/src/add-ons/kernel/file_systems/ramfs/DataContainer.cpp (revision eea5774f46bba925156498abf9cb1a1165647bf7)
1 /*
2  * Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2019-2024, Haiku, Inc. All rights reserved.
4  * Distributed under the terms of the MIT license.
5  */
6 #include "DataContainer.h"
7 
8 #include <StackOrHeapArray.h>
9 #include <util/AutoLock.h>
10 #include <util/BitUtils.h>
11 
12 #include <vm/VMCache.h>
13 #include <vm/vm_page.h>
14 
15 #include "AllocationInfo.h"
16 #include "DebugSupport.h"
17 #include "Misc.h"
18 #include "Volume.h"
19 #include "cache_support.h"
20 
21 
22 // Initial size of the DataContainer's small buffer. If it contains data up to
23 // this size, nothing is allocated, but the small buffer is used instead.
24 // 16 bytes are for free, since they are shared with the block list.
25 // (actually even more, since the list has an initial size).
26 // I ran a test analyzing what sizes the attributes in my system have:
27 //     size   percentage   bytes used in average
28 //   <=   0         0.00                   93.45
29 //   <=   4        25.46                   75.48
30 //   <=   8        30.54                   73.02
31 //   <=  16        52.98                   60.37
32 //   <=  32        80.19                   51.74
33 //   <=  64        94.38                   70.54
34 //   <= 126        96.90                  128.23
35 //
36 // For average memory usage it is assumed, that attributes larger than 126
37 // bytes have size 127, that the list has an initial capacity of 10 entries
38 // (40 bytes), that the block reference consumes 4 bytes and the block header
39 // 12 bytes. The optimal length is actually 35, with 51.05 bytes per
40 // attribute, but I conservatively rounded to 32.
41 static const off_t kMinimumSmallBufferSize = 32;
42 static const off_t kMaximumSmallBufferSize = (B_PAGE_SIZE / 4);
43 
44 
45 
46 DataContainer::DataContainer(Volume *volume)
47 	: fVolume(volume),
48 	  fSize(0),
49 	  fCache(NULL),
50 	  fSmallBuffer(NULL),
51 	  fSmallBufferSize(0)
52 {
53 }
54 
55 
56 DataContainer::~DataContainer()
57 {
58 	if (fCache != NULL) {
59 		fCache->Lock();
60 		fCache->ReleaseRefAndUnlock();
61 		fCache = NULL;
62 	}
63 	if (fSmallBuffer != NULL) {
64 		free(fSmallBuffer);
65 		fSmallBuffer = NULL;
66 	}
67 }
68 
69 
70 status_t
71 DataContainer::InitCheck() const
72 {
73 	return (fVolume != NULL ? B_OK : B_ERROR);
74 }
75 
76 
77 VMCache*
78 DataContainer::GetCache()
79 {
80 	// TODO: Because we always get the cache for files on creation vs. on demand,
81 	// this means files (no matter how small) always use cache mode at present.
82 	if (!_IsCacheMode())
83 		_SwitchToCacheMode();
84 	return fCache;
85 }
86 
87 
88 status_t
89 DataContainer::Resize(off_t newSize)
90 {
91 //	PRINT("DataContainer::Resize(%lld), fSize: %lld\n", newSize, fSize);
92 
93 	status_t error = B_OK;
94 	if (_RequiresCacheMode(newSize)) {
95 		if (newSize < fSize) {
96 			// shrink
97 			// resize the VMCache, which will automatically free pages
98 			AutoLocker<VMCache> _(fCache);
99 			error = fCache->Resize(newSize, VM_PRIORITY_USER);
100 			if (error != B_OK)
101 				return error;
102 		} else {
103 			// grow
104 			if (!_IsCacheMode())
105 				error = _SwitchToCacheMode();
106 			if (error != B_OK)
107 				return error;
108 
109 			AutoLocker<VMCache> _(fCache);
110 			fCache->Resize(newSize, VM_PRIORITY_USER);
111 
112 			// pages will be added as they are written to; so nothing else
113 			// needs to be done here.
114 		}
115 	} else if (fSmallBufferSize < newSize
116 			|| (fSmallBufferSize - newSize) > (kMaximumSmallBufferSize / 2)) {
117 		const size_t newBufferSize = max_c(next_power_of_2(newSize),
118 			kMinimumSmallBufferSize);
119 		void* newBuffer = realloc(fSmallBuffer, newBufferSize);
120 		if (newBuffer == NULL)
121 			return B_NO_MEMORY;
122 
123 		fSmallBufferSize = newBufferSize;
124 		fSmallBuffer = (uint8*)newBuffer;
125 	}
126 
127 	fSize = newSize;
128 
129 //	PRINT("DataContainer::Resize() done: %lx, fSize: %lld\n", error, fSize);
130 	return error;
131 }
132 
133 
134 status_t
135 DataContainer::ReadAt(off_t offset, void *_buffer, size_t size,
136 	size_t *bytesRead)
137 {
138 	uint8 *buffer = (uint8*)_buffer;
139 	status_t error = (buffer && offset >= 0 &&
140 		bytesRead ? B_OK : B_BAD_VALUE);
141 	if (error != B_OK)
142 		return error;
143 
144 	// read not more than we have to offer
145 	offset = min(offset, fSize);
146 	size = min(size, size_t(fSize - offset));
147 
148 	if (!_IsCacheMode()) {
149 		// in non-cache mode, use the "small buffer"
150 		if (IS_USER_ADDRESS(buffer)) {
151 			error = user_memcpy(buffer, fSmallBuffer + offset, size);
152 			if (error != B_OK)
153 				size = 0;
154 		} else {
155 			memcpy(buffer, fSmallBuffer + offset, size);
156 		}
157 
158 		if (bytesRead != NULL)
159 			*bytesRead = size;
160 		return error;
161 	}
162 
163 	// cache mode
164 	error = _DoCacheIO(offset, buffer, size, bytesRead, false);
165 
166 	return error;
167 }
168 
169 
170 status_t
171 DataContainer::WriteAt(off_t offset, const void *_buffer, size_t size,
172 	size_t *bytesWritten)
173 {
174 	PRINT("DataContainer::WriteAt(%lld, %p, %lu, %p), fSize: %lld\n", offset, _buffer, size, bytesWritten, fSize);
175 
176 	const uint8 *buffer = (const uint8*)_buffer;
177 	status_t error = (buffer && offset >= 0 && bytesWritten
178 		? B_OK : B_BAD_VALUE);
179 	if (error != B_OK)
180 		return error;
181 
182 	// resize the container, if necessary
183 	if ((offset + (off_t)size) > fSize)
184 		error = Resize(offset + size);
185 	if (error != B_OK)
186 		return error;
187 
188 	if (!_IsCacheMode()) {
189 		// in non-cache mode, use the "small buffer"
190 		if (IS_USER_ADDRESS(buffer)) {
191 			error = user_memcpy(fSmallBuffer + offset, buffer, size);
192 			if (error != B_OK)
193 				size = 0;
194 		} else {
195 			memcpy(fSmallBuffer + offset, buffer, size);
196 		}
197 
198 		if (bytesWritten != NULL)
199 			*bytesWritten = size;
200 		return error;
201 	}
202 
203 	// cache mode
204 	error = _DoCacheIO(offset, (uint8*)buffer, size, bytesWritten, true);
205 
206 	PRINT("DataContainer::WriteAt() done: %lx, fSize: %lld\n", error, fSize);
207 	return error;
208 }
209 
210 
211 void
212 DataContainer::GetAllocationInfo(AllocationInfo &info)
213 {
214 	if (_IsCacheMode()) {
215 		info.AddAreaAllocation(fCache->committed_size);
216 	} else {
217 		// ...
218 	}
219 }
220 
221 
222 inline bool
223 DataContainer::_RequiresCacheMode(size_t size)
224 {
225 	// we cannot back out of cache mode after entering it,
226 	// as there may be other consumers of our VMCache
227 	return _IsCacheMode() || (size > kMaximumSmallBufferSize);
228 }
229 
230 
231 inline bool
232 DataContainer::_IsCacheMode() const
233 {
234 	return fCache != NULL;
235 }
236 
237 
238 inline int32
239 DataContainer::_CountBlocks() const
240 {
241 	if (_IsCacheMode())
242 		return fCache->page_count;
243 	else if (fSize == 0)	// small buffer mode, empty buffer
244 		return 0;
245 	return 1;	// small buffer mode, non-empty buffer
246 }
247 
248 
249 status_t
250 DataContainer::_SwitchToCacheMode()
251 {
252 	status_t error = VMCacheFactory::CreateAnonymousCache(fCache, false, 0,
253 		0, false, VM_PRIORITY_USER);
254 	if (error != B_OK)
255 		return error;
256 
257 	fCache->temporary = 1;
258 	fCache->virtual_end = fSize;
259 
260 	error = fCache->Commit(fSize, VM_PRIORITY_USER);
261 	if (error != B_OK)
262 		return error;
263 
264 	if (fSize != 0)
265 		error = _DoCacheIO(0, fSmallBuffer, fSize, NULL, true);
266 
267 	free(fSmallBuffer);
268 	fSmallBuffer = NULL;
269 	fSmallBufferSize = 0;
270 
271 	return error;
272 }
273 
274 
275 status_t
276 DataContainer::_DoCacheIO(const off_t offset, uint8* buffer, ssize_t length,
277 	size_t* bytesProcessed, bool isWrite)
278 {
279 	const size_t originalLength = length;
280 	const bool user = IS_USER_ADDRESS(buffer);
281 
282 	const off_t rounded_offset = ROUNDDOWN(offset, B_PAGE_SIZE);
283 	const size_t rounded_len = ROUNDUP((length) + (offset - rounded_offset),
284 		B_PAGE_SIZE);
285 	BStackOrHeapArray<vm_page*, 16> pages(rounded_len / B_PAGE_SIZE);
286 	if (!pages.IsValid())
287 		return B_NO_MEMORY;
288 
289 	cache_get_pages(fCache, rounded_offset, rounded_len, isWrite, pages);
290 
291 	status_t error = B_OK;
292 	size_t index = 0;
293 
294 	while (length > 0) {
295 		vm_page* page = pages[index];
296 		phys_addr_t at = (page != NULL)
297 			? (page->physical_page_number * B_PAGE_SIZE) : 0;
298 		ssize_t bytes = B_PAGE_SIZE;
299 		if (index == 0) {
300 			const uint32 pageoffset = (offset % B_PAGE_SIZE);
301 			at += pageoffset;
302 			bytes -= pageoffset;
303 		}
304 		bytes = min(length, bytes);
305 
306 		if (isWrite) {
307 			page->modified = true;
308 			error = vm_memcpy_to_physical(at, buffer, bytes, user);
309 		} else {
310 			if (page != NULL) {
311 				error = vm_memcpy_from_physical(buffer, at, bytes, user);
312 			} else {
313 				if (user) {
314 					error = user_memset(buffer, 0, bytes);
315 				} else {
316 					memset(buffer, 0, bytes);
317 				}
318 			}
319 		}
320 		if (error != B_OK)
321 			break;
322 
323 		buffer += bytes;
324 		length -= bytes;
325 		index++;
326 	}
327 
328 	cache_put_pages(fCache, rounded_offset, rounded_len, pages, error == B_OK);
329 
330 	if (bytesProcessed != NULL)
331 		*bytesProcessed = length > 0 ? originalLength - length : originalLength;
332 
333 	return error;
334 }
335