xref: /haiku/src/add-ons/kernel/file_systems/packagefs/package/CachedDataReader.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2010-2014, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "CachedDataReader.h"
8 
9 #include <algorithm>
10 
11 #include <DataIO.h>
12 
13 #include <util/AutoLock.h>
14 #include <vm/VMCache.h>
15 #include <vm/vm_page.h>
16 
17 #include "DebugSupport.h"
18 
19 
20 using BPackageKit::BHPKG::BBufferDataReader;
21 
22 
23 static inline bool
24 page_physical_number_less(const vm_page* a, const vm_page* b)
25 {
26 	return a->physical_page_number < b->physical_page_number;
27 }
28 
29 
30 // #pragma mark - PagesDataOutput
31 
32 
33 struct CachedDataReader::PagesDataOutput : public BDataIO {
34 	PagesDataOutput(vm_page** pages, size_t pageCount)
35 		:
36 		fPages(pages),
37 		fPageCount(pageCount),
38 		fInPageOffset(0)
39 	{
40 	}
41 
42 	virtual ssize_t Write(const void* buffer, size_t size)
43 	{
44 		size_t bytesRemaining = size;
45 		while (bytesRemaining > 0) {
46 			if (fPageCount == 0)
47 				return B_BAD_VALUE;
48 
49 			size_t toCopy = std::min(bytesRemaining,
50 				B_PAGE_SIZE - fInPageOffset);
51 			status_t error = vm_memcpy_to_physical(
52 				fPages[0]->physical_page_number * B_PAGE_SIZE + fInPageOffset,
53 				buffer, toCopy, false);
54 			if (error != B_OK)
55 				return error;
56 
57 			fInPageOffset += toCopy;
58 			if (fInPageOffset == B_PAGE_SIZE) {
59 				fInPageOffset = 0;
60 				fPages++;
61 				fPageCount--;
62 			}
63 
64 			buffer = (const char*)buffer + toCopy;
65 			bytesRemaining -= toCopy;
66 		}
67 
68 		return size;
69 	}
70 
71 private:
72 	vm_page**	fPages;
73 	size_t		fPageCount;
74 	size_t		fInPageOffset;
75 };
76 
77 
78 // #pragma mark - CachedDataReader
79 
80 
81 CachedDataReader::CachedDataReader()
82 	:
83 	fReader(NULL),
84 	fCache(NULL),
85 	fCacheLineLockers()
86 {
87 	mutex_init(&fLock, "packagefs cached reader");
88 }
89 
90 
91 CachedDataReader::~CachedDataReader()
92 {
93 	if (fCache != NULL) {
94 		fCache->Lock();
95 		fCache->ReleaseRefAndUnlock();
96 	}
97 
98 	mutex_destroy(&fLock);
99 }
100 
101 
102 status_t
103 CachedDataReader::Init(BAbstractBufferedDataReader* reader, off_t size)
104 {
105 	fReader = reader;
106 
107 	status_t error = fCacheLineLockers.Init();
108 	if (error != B_OK)
109 		RETURN_ERROR(error);
110 
111 	error = VMCacheFactory::CreateNullCache(VM_PRIORITY_SYSTEM,
112 		fCache);
113 	if (error != B_OK)
114 		RETURN_ERROR(error);
115 
116 	AutoLocker<VMCache> locker(fCache);
117 
118 	error = fCache->Resize(size, VM_PRIORITY_SYSTEM);
119 	if (error != B_OK)
120 		RETURN_ERROR(error);
121 
122 	return B_OK;
123 }
124 
125 
126 status_t
127 CachedDataReader::ReadDataToOutput(off_t offset, size_t size,
128 	BDataIO* output)
129 {
130 	if (offset > fCache->virtual_end
131 		|| (off_t)size > fCache->virtual_end - offset) {
132 		return B_BAD_VALUE;
133 	}
134 
135 	if (size == 0)
136 		return B_OK;
137 
138 	while (size > 0) {
139 		// the start of the current cache line
140 		off_t lineOffset = (offset / kCacheLineSize) * kCacheLineSize;
141 
142 		// intersection of request and cache line
143 		off_t cacheLineEnd = std::min(lineOffset + (off_t)kCacheLineSize,
144 			fCache->virtual_end);
145 		size_t requestLineLength
146 			= std::min(cacheLineEnd - offset, (off_t)size);
147 
148 		// transfer the data of the cache line
149 		status_t error = _ReadCacheLine(lineOffset, cacheLineEnd - lineOffset,
150 			offset, requestLineLength, output);
151 		if (error != B_OK)
152 			return error;
153 
154 		offset = cacheLineEnd;
155 		size -= requestLineLength;
156 	}
157 
158 	return B_OK;
159 }
160 
161 
162 status_t
163 CachedDataReader::_ReadCacheLine(off_t lineOffset, size_t lineSize,
164 	off_t requestOffset, size_t requestLength, BDataIO* output)
165 {
166 	PRINT("CachedDataReader::_ReadCacheLine(%" B_PRIdOFF ", %zu, %" B_PRIdOFF
167 		", %zu, %p\n", lineOffset, lineSize, requestOffset, requestLength,
168 		output);
169 
170 	CacheLineLocker cacheLineLocker(this, lineOffset);
171 
172 	// check whether there are pages of the cache line and the mark them used
173 	page_num_t firstPageOffset = lineOffset / B_PAGE_SIZE;
174 	page_num_t linePageCount = (lineSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
175 	vm_page* pages[kPagesPerCacheLine] = {};
176 
177 	AutoLocker<VMCache> cacheLocker(fCache);
178 
179 	page_num_t firstMissing = 0;
180 	page_num_t lastMissing = 0;
181 	page_num_t missingPages = 0;
182 	page_num_t pageOffset = firstPageOffset;
183 
184 	VMCachePagesTree::Iterator it = fCache->pages.GetIterator(pageOffset, true,
185 		true);
186 	while (pageOffset < firstPageOffset + linePageCount) {
187 		vm_page* page = it.Next();
188 		page_num_t currentPageOffset;
189 		if (page == NULL
190 			|| page->cache_offset >= firstPageOffset + linePageCount) {
191 			page = NULL;
192 			currentPageOffset = firstPageOffset + linePageCount;
193 		} else
194 			currentPageOffset = page->cache_offset;
195 
196 		if (pageOffset < currentPageOffset) {
197 			// pages are missing
198 			if (missingPages == 0)
199 				firstMissing = pageOffset;
200 			lastMissing = currentPageOffset - 1;
201 			missingPages += currentPageOffset - pageOffset;
202 
203 			for (; pageOffset < currentPageOffset; pageOffset++)
204 				pages[pageOffset - firstPageOffset] = NULL;
205 		}
206 
207 		if (page != NULL) {
208 			pages[pageOffset++ - firstPageOffset] = page;
209 			DEBUG_PAGE_ACCESS_START(page);
210 			vm_page_set_state(page, PAGE_STATE_UNUSED);
211 			DEBUG_PAGE_ACCESS_END(page);
212 		}
213 	}
214 
215 	cacheLocker.Unlock();
216 
217 	if (missingPages > 0) {
218 // TODO: If the missing pages range doesn't intersect with the request, just
219 // satisfy the request and don't read anything at all.
220 		// There are pages of the cache line missing. We have to allocate fresh
221 		// ones.
222 
223 		// reserve
224 		vm_page_reservation reservation;
225 		if (!vm_page_try_reserve_pages(&reservation, missingPages,
226 				VM_PRIORITY_SYSTEM)) {
227 			_DiscardPages(pages, firstMissing - firstPageOffset, missingPages);
228 
229 			// fall back to uncached transfer
230 			return fReader->ReadDataToOutput(requestOffset, requestLength,
231 				output);
232 		}
233 
234 		// Allocate the missing pages and remove the already existing pages in
235 		// the range from the cache. We're going to read/write the whole range
236 		// anyway.
237 		for (pageOffset = firstMissing; pageOffset <= lastMissing;
238 				pageOffset++) {
239 			page_num_t index = pageOffset - firstPageOffset;
240 			if (pages[index] == NULL) {
241 				pages[index] = vm_page_allocate_page(&reservation,
242 					PAGE_STATE_UNUSED);
243 				DEBUG_PAGE_ACCESS_END(pages[index]);
244 			} else {
245 				cacheLocker.Lock();
246 				fCache->RemovePage(pages[index]);
247 				cacheLocker.Unlock();
248 			}
249 		}
250 
251 		missingPages = lastMissing - firstMissing + 1;
252 
253 		// add the pages to the cache
254 		cacheLocker.Lock();
255 
256 		for (pageOffset = firstMissing; pageOffset <= lastMissing;
257 				pageOffset++) {
258 			page_num_t index = pageOffset - firstPageOffset;
259 			fCache->InsertPage(pages[index], (off_t)pageOffset * B_PAGE_SIZE);
260 		}
261 
262 		cacheLocker.Unlock();
263 
264 		// read in the missing pages
265 		status_t error = _ReadIntoPages(pages, firstMissing - firstPageOffset,
266 			missingPages);
267 		if (error != B_OK) {
268 			ERROR("CachedDataReader::_ReadCacheLine(): Failed to read into "
269 				"cache (offset: %" B_PRIdOFF ", length: %" B_PRIuSIZE "), "
270 				"trying uncached read (offset: %" B_PRIdOFF ", length: %"
271 				B_PRIuSIZE ")\n", (off_t)firstMissing * B_PAGE_SIZE,
272 				(size_t)missingPages * B_PAGE_SIZE, requestOffset,
273 				requestLength);
274 
275 			_DiscardPages(pages, firstMissing - firstPageOffset, missingPages);
276 
277 			// Try again using an uncached transfer
278 			return fReader->ReadDataToOutput(requestOffset, requestLength,
279 				output);
280 		}
281 	}
282 
283 	// write data to output
284 	status_t error = _WritePages(pages, requestOffset - lineOffset,
285 		requestLength, output);
286 	_CachePages(pages, 0, linePageCount);
287 	return error;
288 }
289 
290 
291 /*!	Frees all pages in given range of the \a pages array.
292 	\c NULL entries in the range are OK. All non \c NULL entries must refer
293 	to pages with \c PAGE_STATE_UNUSED. The pages may belong to \c fCache or
294 	may not have a cache.
295 	\c fCache must not be locked.
296 */
297 void
298 CachedDataReader::_DiscardPages(vm_page** pages, size_t firstPage,
299 	size_t pageCount)
300 {
301 	PRINT("%p->CachedDataReader::_DiscardPages(%" B_PRIuSIZE ", %" B_PRIuSIZE
302 		")\n", this, firstPage, pageCount);
303 
304 	AutoLocker<VMCache> cacheLocker(fCache);
305 
306 	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
307 		vm_page* page = pages[i];
308 		if (page == NULL)
309 			continue;
310 
311 		DEBUG_PAGE_ACCESS_START(page);
312 
313 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
314 			"page: %p @! page -m %p", page, page);
315 
316 		if (page->Cache() != NULL)
317 			fCache->RemovePage(page);
318 
319 		vm_page_free(NULL, page);
320 	}
321 }
322 
323 
324 /*!	Marks all pages in the given range of the \a pages array cached.
325 	There must not be any \c NULL entries in the given array range. All pages
326 	must belong to \c cache and have state \c PAGE_STATE_UNUSED.
327 	\c fCache must not be locked.
328 */
329 void
330 CachedDataReader::_CachePages(vm_page** pages, size_t firstPage,
331 	size_t pageCount)
332 {
333 	PRINT("%p->CachedDataReader::_CachePages(%" B_PRIuSIZE ", %" B_PRIuSIZE
334 		")\n", this, firstPage, pageCount);
335 
336 	AutoLocker<VMCache> cacheLocker(fCache);
337 
338 	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
339 		vm_page* page = pages[i];
340 		ASSERT(page != NULL);
341 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED
342 				&& page->Cache() == fCache,
343 			"page: %p @! page -m %p", page, page);
344 
345 		DEBUG_PAGE_ACCESS_START(page);
346 		vm_page_set_state(page, PAGE_STATE_CACHED);
347 		DEBUG_PAGE_ACCESS_END(page);
348 	}
349 }
350 
351 
352 /*!	Writes the contents of pages in \c pages to \a output.
353 	\param pages The pages array.
354 	\param pagesRelativeOffset The offset relative to \a pages[0] where to
355 		start writing from.
356 	\param requestLength The number of bytes to write.
357 	\param output The output to which the data shall be written.
358 	\return \c B_OK, if writing went fine, another error code otherwise.
359 */
360 status_t
361 CachedDataReader::_WritePages(vm_page** pages, size_t pagesRelativeOffset,
362 	size_t requestLength, BDataIO* output)
363 {
364 	PRINT("%p->CachedDataReader::_WritePages(%" B_PRIuSIZE ", %" B_PRIuSIZE
365 		", %p)\n", this, pagesRelativeOffset, requestLength, output);
366 
367 	size_t firstPage = pagesRelativeOffset / B_PAGE_SIZE;
368 	size_t endPage = (pagesRelativeOffset + requestLength + B_PAGE_SIZE - 1)
369 		/ B_PAGE_SIZE;
370 
371 	// fallback to copying individual pages
372 	size_t inPageOffset = pagesRelativeOffset % B_PAGE_SIZE;
373 	for (size_t i = firstPage; i < endPage; i++) {
374 		// map the page
375 		void* handle;
376 		addr_t address;
377 		status_t error = vm_get_physical_page(
378 			pages[i]->physical_page_number * B_PAGE_SIZE, &address,
379 			&handle);
380 		if (error != B_OK)
381 			return error;
382 
383 		// write the page's data
384 		size_t toCopy = std::min(B_PAGE_SIZE - inPageOffset, requestLength);
385 		error = output->WriteExactly((uint8*)(address + inPageOffset), toCopy);
386 
387 		// unmap the page
388 		vm_put_physical_page(address, handle);
389 
390 		if (error != B_OK)
391 			return error;
392 
393 		inPageOffset = 0;
394 		requestLength -= toCopy;
395 	}
396 
397 	return B_OK;
398 }
399 
400 
401 status_t
402 CachedDataReader::_ReadIntoPages(vm_page** pages, size_t firstPage,
403 	size_t pageCount)
404 {
405 	PagesDataOutput output(pages + firstPage, pageCount);
406 
407 	off_t firstPageOffset = (off_t)pages[firstPage]->cache_offset
408 		* B_PAGE_SIZE;
409 	generic_size_t requestLength = std::min(
410 			firstPageOffset + (off_t)pageCount * B_PAGE_SIZE,
411 			fCache->virtual_end)
412 		- firstPageOffset;
413 
414 	return fReader->ReadDataToOutput(firstPageOffset, requestLength, &output);
415 }
416 
417 
418 void
419 CachedDataReader::_LockCacheLine(CacheLineLocker* lineLocker)
420 {
421 	MutexLocker locker(fLock);
422 
423 	CacheLineLocker* otherLineLocker
424 		= fCacheLineLockers.Lookup(lineLocker->Offset());
425 	if (otherLineLocker == NULL) {
426 		fCacheLineLockers.Insert(lineLocker);
427 		return;
428 	}
429 
430 	// queue and wait
431 	otherLineLocker->Queue().Add(lineLocker);
432 	lineLocker->Wait(fLock);
433 }
434 
435 
436 void
437 CachedDataReader::_UnlockCacheLine(CacheLineLocker* lineLocker)
438 {
439 	MutexLocker locker(fLock);
440 
441 	fCacheLineLockers.Remove(lineLocker);
442 
443 	if (CacheLineLocker* nextLineLocker = lineLocker->Queue().RemoveHead()) {
444 		nextLineLocker->Queue().MoveFrom(&lineLocker->Queue());
445 		fCacheLineLockers.Insert(nextLineLocker);
446 		nextLineLocker->WakeUp();
447 	}
448 }
449