xref: /haiku/src/add-ons/kernel/file_systems/packagefs/package/CachedDataReader.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2010-2014, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "CachedDataReader.h"
8 
9 #include <algorithm>
10 
11 #include <DataIO.h>
12 
13 #include <util/AutoLock.h>
14 #include <vm/VMCache.h>
15 #include <vm/vm_page.h>
16 
17 #include "DebugSupport.h"
18 
19 
20 using BPackageKit::BHPKG::BBufferDataReader;
21 
22 
23 static inline bool
24 page_physical_number_less(const vm_page* a, const vm_page* b)
25 {
26 	return a->physical_page_number < b->physical_page_number;
27 }
28 
29 
30 // #pragma mark - PagesDataOutput
31 
32 
33 struct CachedDataReader::PagesDataOutput : public BDataIO {
34 	PagesDataOutput(vm_page** pages, size_t pageCount)
35 		:
36 		fPages(pages),
37 		fPageCount(pageCount),
38 		fInPageOffset(0)
39 	{
40 	}
41 
42 	virtual ssize_t Write(const void* buffer, size_t size)
43 	{
44 		size_t bytesRemaining = size;
45 		while (bytesRemaining > 0) {
46 			if (fPageCount == 0)
47 				return B_BAD_VALUE;
48 
49 			size_t toCopy = std::min(bytesRemaining,
50 				B_PAGE_SIZE - fInPageOffset);
51 			status_t error = vm_memcpy_to_physical(
52 				fPages[0]->physical_page_number * B_PAGE_SIZE + fInPageOffset,
53 				buffer, toCopy, false);
54 			if (error != B_OK)
55 				return error;
56 
57 			fInPageOffset += toCopy;
58 			if (fInPageOffset == B_PAGE_SIZE) {
59 				fInPageOffset = 0;
60 				fPages++;
61 				fPageCount--;
62 			}
63 
64 			buffer = (const char*)buffer + toCopy;
65 			bytesRemaining -= toCopy;
66 		}
67 
68 		return size;
69 	}
70 
71 private:
72 	vm_page**	fPages;
73 	size_t		fPageCount;
74 	size_t		fInPageOffset;
75 };
76 
77 
78 // #pragma mark - CachedDataReader
79 
80 
81 CachedDataReader::CachedDataReader()
82 	:
83 	fReader(NULL),
84 	fCache(NULL),
85 	fCacheLineLockers()
86 {
87 	mutex_init(&fLock, "packagefs cached reader");
88 }
89 
90 
91 CachedDataReader::~CachedDataReader()
92 {
93 	if (fCache != NULL) {
94 		fCache->Lock();
95 		fCache->ReleaseRefAndUnlock();
96 	}
97 
98 	mutex_destroy(&fLock);
99 }
100 
101 
102 status_t
103 CachedDataReader::Init(BAbstractBufferedDataReader* reader, off_t size)
104 {
105 	fReader = reader;
106 
107 	status_t error = fCacheLineLockers.Init();
108 	if (error != B_OK)
109 		RETURN_ERROR(error);
110 
111 	error = VMCacheFactory::CreateNullCache(VM_PRIORITY_SYSTEM,
112 		fCache);
113 	if (error != B_OK)
114 		RETURN_ERROR(error);
115 
116 	fCache->virtual_end = size;
117 	return B_OK;
118 }
119 
120 
121 status_t
122 CachedDataReader::ReadDataToOutput(off_t offset, size_t size,
123 	BDataIO* output)
124 {
125 	if (offset > fCache->virtual_end
126 		|| (off_t)size > fCache->virtual_end - offset) {
127 		return B_BAD_VALUE;
128 	}
129 
130 	if (size == 0)
131 		return B_OK;
132 
133 	while (size > 0) {
134 		// the start of the current cache line
135 		off_t lineOffset = (offset / kCacheLineSize) * kCacheLineSize;
136 
137 		// intersection of request and cache line
138 		off_t cacheLineEnd = std::min(lineOffset + (off_t)kCacheLineSize,
139 			fCache->virtual_end);
140 		size_t requestLineLength
141 			= std::min(cacheLineEnd - offset, (off_t)size);
142 
143 		// transfer the data of the cache line
144 		status_t error = _ReadCacheLine(lineOffset, cacheLineEnd - lineOffset,
145 			offset, requestLineLength, output);
146 		if (error != B_OK)
147 			return error;
148 
149 		offset = cacheLineEnd;
150 		size -= requestLineLength;
151 	}
152 
153 	return B_OK;
154 }
155 
156 
157 status_t
158 CachedDataReader::_ReadCacheLine(off_t lineOffset, size_t lineSize,
159 	off_t requestOffset, size_t requestLength, BDataIO* output)
160 {
161 	PRINT("CachedDataReader::_ReadCacheLine(%" B_PRIdOFF ", %zu, %" B_PRIdOFF
162 		", %zu, %p\n", lineOffset, lineSize, requestOffset, requestLength,
163 		output);
164 
165 	CacheLineLocker cacheLineLocker(this, lineOffset);
166 
167 	// check whether there are pages of the cache line and the mark them used
168 	page_num_t firstPageOffset = lineOffset / B_PAGE_SIZE;
169 	page_num_t linePageCount = (lineSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
170 	vm_page* pages[kPagesPerCacheLine] = {};
171 
172 	AutoLocker<VMCache> cacheLocker(fCache);
173 
174 	page_num_t firstMissing = 0;
175 	page_num_t lastMissing = 0;
176 	page_num_t missingPages = 0;
177 	page_num_t pageOffset = firstPageOffset;
178 
179 	VMCachePagesTree::Iterator it = fCache->pages.GetIterator(pageOffset, true,
180 		true);
181 	while (pageOffset < firstPageOffset + linePageCount) {
182 		vm_page* page = it.Next();
183 		page_num_t currentPageOffset;
184 		if (page == NULL
185 			|| page->cache_offset >= firstPageOffset + linePageCount) {
186 			page = NULL;
187 			currentPageOffset = firstPageOffset + linePageCount;
188 		} else
189 			currentPageOffset = page->cache_offset;
190 
191 		if (pageOffset < currentPageOffset) {
192 			// pages are missing
193 			if (missingPages == 0)
194 				firstMissing = pageOffset;
195 			lastMissing = currentPageOffset - 1;
196 			missingPages += currentPageOffset - pageOffset;
197 
198 			for (; pageOffset < currentPageOffset; pageOffset++)
199 				pages[pageOffset - firstPageOffset] = NULL;
200 		}
201 
202 		if (page != NULL) {
203 			pages[pageOffset++ - firstPageOffset] = page;
204 			DEBUG_PAGE_ACCESS_START(page);
205 			vm_page_set_state(page, PAGE_STATE_UNUSED);
206 			DEBUG_PAGE_ACCESS_END(page);
207 		}
208 	}
209 
210 	cacheLocker.Unlock();
211 
212 	if (missingPages > 0) {
213 // TODO: If the missing pages range doesn't intersect with the request, just
214 // satisfy the request and don't read anything at all.
215 		// There are pages of the cache line missing. We have to allocate fresh
216 		// ones.
217 
218 		// reserve
219 		vm_page_reservation reservation;
220 		if (!vm_page_try_reserve_pages(&reservation, missingPages,
221 				VM_PRIORITY_SYSTEM)) {
222 			_DiscardPages(pages, firstMissing - firstPageOffset, missingPages);
223 
224 			// fall back to uncached transfer
225 			return fReader->ReadDataToOutput(requestOffset, requestLength,
226 				output);
227 		}
228 
229 		// Allocate the missing pages and remove the already existing pages in
230 		// the range from the cache. We're going to read/write the whole range
231 		// anyway.
232 		for (pageOffset = firstMissing; pageOffset <= lastMissing;
233 				pageOffset++) {
234 			page_num_t index = pageOffset - firstPageOffset;
235 			if (pages[index] == NULL) {
236 				pages[index] = vm_page_allocate_page(&reservation,
237 					PAGE_STATE_UNUSED);
238 				DEBUG_PAGE_ACCESS_END(pages[index]);
239 			} else {
240 				cacheLocker.Lock();
241 				fCache->RemovePage(pages[index]);
242 				cacheLocker.Unlock();
243 			}
244 		}
245 
246 		missingPages = lastMissing - firstMissing + 1;
247 
248 		// add the pages to the cache
249 		cacheLocker.Lock();
250 
251 		for (pageOffset = firstMissing; pageOffset <= lastMissing;
252 				pageOffset++) {
253 			page_num_t index = pageOffset - firstPageOffset;
254 			fCache->InsertPage(pages[index], (off_t)pageOffset * B_PAGE_SIZE);
255 		}
256 
257 		cacheLocker.Unlock();
258 
259 		// read in the missing pages
260 		status_t error = _ReadIntoPages(pages, firstMissing - firstPageOffset,
261 			missingPages);
262 		if (error != B_OK) {
263 			ERROR("CachedDataReader::_ReadCacheLine(): Failed to read into "
264 				"cache (offset: %" B_PRIdOFF ", length: %" B_PRIuSIZE "), "
265 				"trying uncached read (offset: %" B_PRIdOFF ", length: %"
266 				B_PRIuSIZE ")\n", (off_t)firstMissing * B_PAGE_SIZE,
267 				(size_t)missingPages * B_PAGE_SIZE, requestOffset,
268 				requestLength);
269 
270 			_DiscardPages(pages, firstMissing - firstPageOffset, missingPages);
271 
272 			// Try again using an uncached transfer
273 			return fReader->ReadDataToOutput(requestOffset, requestLength,
274 				output);
275 		}
276 	}
277 
278 	// write data to output
279 	status_t error = _WritePages(pages, requestOffset - lineOffset,
280 		requestLength, output);
281 	_CachePages(pages, 0, linePageCount);
282 	return error;
283 }
284 
285 
286 /*!	Frees all pages in given range of the \a pages array.
287 	\c NULL entries in the range are OK. All non \c NULL entries must refer
288 	to pages with \c PAGE_STATE_UNUSED. The pages may belong to \c fCache or
289 	may not have a cache.
290 	\c fCache must not be locked.
291 */
292 void
293 CachedDataReader::_DiscardPages(vm_page** pages, size_t firstPage,
294 	size_t pageCount)
295 {
296 	PRINT("%p->CachedDataReader::_DiscardPages(%" B_PRIuSIZE ", %" B_PRIuSIZE
297 		")\n", this, firstPage, pageCount);
298 
299 	AutoLocker<VMCache> cacheLocker(fCache);
300 
301 	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
302 		vm_page* page = pages[i];
303 		if (page == NULL)
304 			continue;
305 
306 		DEBUG_PAGE_ACCESS_START(page);
307 
308 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
309 			"page: %p @! page -m %p", page, page);
310 
311 		if (page->Cache() != NULL)
312 			fCache->RemovePage(page);
313 
314 		vm_page_free(NULL, page);
315 	}
316 }
317 
318 
319 /*!	Marks all pages in the given range of the \a pages array cached.
320 	There must not be any \c NULL entries in the given array range. All pages
321 	must belong to \c cache and have state \c PAGE_STATE_UNUSED.
322 	\c fCache must not be locked.
323 */
324 void
325 CachedDataReader::_CachePages(vm_page** pages, size_t firstPage,
326 	size_t pageCount)
327 {
328 	PRINT("%p->CachedDataReader::_CachePages(%" B_PRIuSIZE ", %" B_PRIuSIZE
329 		")\n", this, firstPage, pageCount);
330 
331 	AutoLocker<VMCache> cacheLocker(fCache);
332 
333 	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
334 		vm_page* page = pages[i];
335 		ASSERT(page != NULL);
336 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED
337 				&& page->Cache() == fCache,
338 			"page: %p @! page -m %p", page, page);
339 
340 		DEBUG_PAGE_ACCESS_START(page);
341 		vm_page_set_state(page, PAGE_STATE_CACHED);
342 		DEBUG_PAGE_ACCESS_END(page);
343 	}
344 }
345 
346 
347 /*!	Writes the contents of pages in \c pages to \a output.
348 	\param pages The pages array.
349 	\param pagesRelativeOffset The offset relative to \a pages[0] where to
350 		start writing from.
351 	\param requestLength The number of bytes to write.
352 	\param output The output to which the data shall be written.
353 	\return \c B_OK, if writing went fine, another error code otherwise.
354 */
355 status_t
356 CachedDataReader::_WritePages(vm_page** pages, size_t pagesRelativeOffset,
357 	size_t requestLength, BDataIO* output)
358 {
359 	PRINT("%p->CachedDataReader::_WritePages(%" B_PRIuSIZE ", %" B_PRIuSIZE
360 		", %p)\n", this, pagesRelativeOffset, requestLength, output);
361 
362 	size_t firstPage = pagesRelativeOffset / B_PAGE_SIZE;
363 	size_t endPage = (pagesRelativeOffset + requestLength + B_PAGE_SIZE - 1)
364 		/ B_PAGE_SIZE;
365 
366 	// fallback to copying individual pages
367 	size_t inPageOffset = pagesRelativeOffset % B_PAGE_SIZE;
368 	for (size_t i = firstPage; i < endPage; i++) {
369 		// map the page
370 		void* handle;
371 		addr_t address;
372 		status_t error = vm_get_physical_page(
373 			pages[i]->physical_page_number * B_PAGE_SIZE, &address,
374 			&handle);
375 		if (error != B_OK)
376 			return error;
377 
378 		// write the page's data
379 		size_t toCopy = std::min(B_PAGE_SIZE - inPageOffset, requestLength);
380 		error = output->WriteExactly((uint8*)(address + inPageOffset), toCopy);
381 
382 		// unmap the page
383 		vm_put_physical_page(address, handle);
384 
385 		if (error != B_OK)
386 			return error;
387 
388 		inPageOffset = 0;
389 		requestLength -= toCopy;
390 	}
391 
392 	return B_OK;
393 }
394 
395 
396 status_t
397 CachedDataReader::_ReadIntoPages(vm_page** pages, size_t firstPage,
398 	size_t pageCount)
399 {
400 	PagesDataOutput output(pages + firstPage, pageCount);
401 
402 	off_t firstPageOffset = (off_t)pages[firstPage]->cache_offset
403 		* B_PAGE_SIZE;
404 	generic_size_t requestLength = std::min(
405 			firstPageOffset + (off_t)pageCount * B_PAGE_SIZE,
406 			fCache->virtual_end)
407 		- firstPageOffset;
408 
409 	return fReader->ReadDataToOutput(firstPageOffset, requestLength, &output);
410 }
411 
412 
413 void
414 CachedDataReader::_LockCacheLine(CacheLineLocker* lineLocker)
415 {
416 	MutexLocker locker(fLock);
417 
418 	CacheLineLocker* otherLineLocker
419 		= fCacheLineLockers.Lookup(lineLocker->Offset());
420 	if (otherLineLocker == NULL) {
421 		fCacheLineLockers.Insert(lineLocker);
422 		return;
423 	}
424 
425 	// queue and wait
426 	otherLineLocker->Queue().Add(lineLocker);
427 	lineLocker->Wait(fLock);
428 }
429 
430 
431 void
432 CachedDataReader::_UnlockCacheLine(CacheLineLocker* lineLocker)
433 {
434 	MutexLocker locker(fLock);
435 
436 	fCacheLineLockers.Remove(lineLocker);
437 
438 	if (CacheLineLocker* nextLineLocker = lineLocker->Queue().RemoveHead()) {
439 		nextLineLocker->Queue().MoveFrom(&lineLocker->Queue());
440 		fCacheLineLockers.Insert(nextLineLocker);
441 		nextLineLocker->WakeUp();
442 	}
443 }
444