xref: /haiku/src/system/kernel/device_manager/IOCache.cpp (revision 830f67ef991407f287dbc1238aa5f5906d90c991)
1 /*
2  * Copyright 2010-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "IOCache.h"
8 
9 #include <algorithm>
10 
11 #include <condition_variable.h>
12 #include <heap.h>
13 #include <low_resource_manager.h>
14 #include <util/AutoLock.h>
15 #include <vm/vm.h>
16 #include <vm/VMAddressSpace.h>
17 #include <vm/VMCache.h>
18 #include <vm/VMTranslationMap.h>
19 
20 
21 //#define TRACE_IO_CACHE 1
22 #ifdef TRACE_IO_CACHE
23 #	define TRACE(format...)	dprintf(format)
24 #else
25 #	define TRACE(format...)	do {} while (false)
26 #endif
27 
28 
29 static inline bool
30 page_physical_number_less(const vm_page* a, const vm_page* b)
31 {
32 	return a->physical_page_number < b->physical_page_number;
33 }
34 
35 
36 struct IOCache::Operation : IOOperation {
37 	ConditionVariable	finishedCondition;
38 };
39 
40 
41 IOCache::IOCache(DMAResource* resource, size_t cacheLineSize)
42 	:
43 	IOScheduler(resource),
44 	fDeviceCapacity(0),
45 	fLineSize(cacheLineSize),
46 	fPagesPerLine(cacheLineSize / B_PAGE_SIZE),
47 	fArea(-1),
48 	fCache(NULL),
49 	fPages(NULL),
50 	fVecs(NULL)
51 {
52 	ASSERT(resource != NULL);
53 	TRACE("%p->IOCache::IOCache(%p, %" B_PRIuSIZE ")\n", this, resource,
54 		cacheLineSize);
55 
56 	if (cacheLineSize < B_PAGE_SIZE
57 		|| (cacheLineSize & (cacheLineSize - 1)) != 0) {
58 		panic("Invalid cache line size (%" B_PRIuSIZE "). Must be a power of 2 "
59 			"multiple of the page size.", cacheLineSize);
60 	}
61 
62 	mutex_init(&fSerializationLock, "I/O cache request serialization");
63 
64 	fLineSizeShift = 0;
65 	while (cacheLineSize != 1) {
66 		fLineSizeShift++;
67 		cacheLineSize >>= 1;
68 	}
69 }
70 
71 
72 IOCache::~IOCache()
73 {
74 	if (fArea >= 0) {
75 		vm_page_unreserve_pages(&fMappingReservation);
76 		delete_area(fArea);
77 	}
78 
79 	delete[] fPages;
80 	delete[] fVecs;
81 
82 	mutex_destroy(&fSerializationLock);
83 }
84 
85 
86 status_t
87 IOCache::Init(const char* name)
88 {
89 	TRACE("%p->IOCache::Init(\"%s\")\n", this, name);
90 
91 	status_t error = IOScheduler::Init(name);
92 	if (error != B_OK)
93 		return error;
94 
95 	// create the area for mapping cache lines
96 	fArea = vm_create_null_area(B_SYSTEM_TEAM, "I/O cache line", &fAreaBase,
97 		B_ANY_KERNEL_ADDRESS, fLineSize, 0);
98 	if (fArea < 0)
99 		return fArea;
100 
101 	// reserve pages for mapping a complete cache line
102 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
103 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
104 	size_t pagesNeeded = translationMap->MaxPagesNeededToMap((addr_t)fAreaBase,
105 		(addr_t)fAreaBase + fLineSize - 1);
106 	vm_page_reserve_pages(&fMappingReservation, pagesNeeded,
107 		VM_PRIORITY_SYSTEM);
108 
109 	// get the area's cache
110 	VMArea* area = VMAreaHash::Lookup(fArea);
111 	if (area == NULL) {
112 		panic("IOCache::Init(): Where's our area (id: %" B_PRId32 ")?!", fArea);
113 		return B_ERROR;
114 	}
115 	fCache = area->cache;
116 
117 	// allocate arrays for pages and io vecs
118 	fPages = new(std::nothrow) vm_page*[fPagesPerLine];
119 	fVecs = new(std::nothrow) generic_io_vec[fPagesPerLine];
120 	if (fPages == NULL || fVecs == NULL)
121 		return B_NO_MEMORY;
122 
123 	return B_OK;
124 }
125 
126 
127 void
128 IOCache::SetDeviceCapacity(off_t deviceCapacity)
129 {
130 	TRACE("%p->IOCache::SetDeviceCapacity(%" B_PRIdOFF ")\n", this,
131 		deviceCapacity);
132 
133 	MutexLocker serializationLocker(fSerializationLock);
134 	AutoLocker<VMCache> cacheLocker(fCache);
135 
136 	fDeviceCapacity = deviceCapacity;
137 }
138 
139 
140 void
141 IOCache::MediaChanged()
142 {
143 	TRACE("%p->IOCache::MediaChanged()\n", this);
144 
145 	MutexLocker serializationLocker(fSerializationLock);
146 	AutoLocker<VMCache> cacheLocker(fCache);
147 
148 	// new media -- burn all cached data
149 	while (vm_page* page = fCache->pages.Root()) {
150 		DEBUG_PAGE_ACCESS_START(page);
151 		fCache->RemovePage(page);
152 		vm_page_free(NULL, page);
153 	}
154 }
155 
156 
157 status_t
158 IOCache::ScheduleRequest(IORequest* request)
159 {
160 	TRACE("%p->IOCache::ScheduleRequest(%p)\n", this, request);
161 
162 	// lock the request's memory
163 	status_t error;
164 	IOBuffer* buffer = request->Buffer();
165 	if (buffer->IsVirtual()) {
166 		error = buffer->LockMemory(request->TeamID(), request->IsWrite());
167 		if (error != B_OK) {
168 			request->SetStatusAndNotify(error);
169 			return error;
170 		}
171 	}
172 
173 	// we completely serialize all I/O in FIFO order
174 	MutexLocker serializationLocker(fSerializationLock);
175 	generic_size_t bytesTransferred = 0;
176 	error = _DoRequest(request, bytesTransferred);
177 	serializationLocker.Unlock();
178 
179 	// unlock memory
180 	if (buffer->IsVirtual())
181 		buffer->UnlockMemory(request->TeamID(), request->IsWrite());
182 
183 	// set status and notify
184 	if (error == B_OK) {
185 		request->SetTransferredBytes(bytesTransferred < request->Length(),
186 			bytesTransferred);
187 		request->SetStatusAndNotify(B_OK);
188 	} else
189 		request->SetStatusAndNotify(error);
190 
191 	return error;
192 }
193 
194 
195 void
196 IOCache::AbortRequest(IORequest* request, status_t status)
197 {
198 	// TODO:...
199 }
200 
201 
202 void
203 IOCache::OperationCompleted(IOOperation* operation, status_t status,
204 	generic_size_t transferredBytes)
205 {
206 	if (status == B_OK) {
207 		// always fail in case of partial transfers
208 		((Operation*)operation)->finishedCondition.NotifyAll(
209 			transferredBytes == operation->Length() ? B_OK : B_ERROR);
210 	} else
211 		((Operation*)operation)->finishedCondition.NotifyAll(status);
212 }
213 
214 
215 void
216 IOCache::Dump() const
217 {
218 	kprintf("IOCache at %p\n", this);
219 	kprintf("  DMA resource:   %p\n", fDMAResource);
220 }
221 
222 
223 status_t
224 IOCache::_DoRequest(IORequest* request, generic_size_t& _bytesTransferred)
225 {
226 	off_t offset = request->Offset();
227 	generic_size_t length = request->Length();
228 
229 	TRACE("%p->IOCache::ScheduleRequest(%p): offset: %" B_PRIdOFF
230 		", length: %" B_PRIuSIZE "\n", this, request, offset, length);
231 
232 	if (offset < 0 || offset > fDeviceCapacity)
233 		return B_BAD_VALUE;
234 
235 	// truncate the request to the device capacity
236 	if (fDeviceCapacity - offset < (off_t)length)
237 		length = fDeviceCapacity - offset;
238 
239 	_bytesTransferred = 0;
240 
241 	while (length > 0) {
242 		// the start of the current cache line
243 		off_t lineOffset = (offset >> fLineSizeShift) << fLineSizeShift;
244 
245 		// intersection of request and cache line
246 		off_t cacheLineEnd = std::min(lineOffset + (off_t)fLineSize, fDeviceCapacity);
247 		size_t requestLineLength
248 			= std::min(cacheLineEnd - offset, (off_t)length);
249 
250 		// transfer the data of the cache line
251 		status_t error = _TransferRequestLine(request, lineOffset,
252 			cacheLineEnd - lineOffset, offset, requestLineLength);
253 		if (error != B_OK)
254 			return error;
255 
256 		offset = cacheLineEnd;
257 		length -= requestLineLength;
258 		_bytesTransferred += requestLineLength;
259 	}
260 
261 	return B_OK;
262 }
263 
264 
265 status_t
266 IOCache::_TransferRequestLine(IORequest* request, off_t lineOffset,
267 	size_t lineSize, off_t requestOffset, size_t requestLength)
268 {
269 	TRACE("%p->IOCache::_TransferRequestLine(%p, %" B_PRIdOFF
270 		", %" B_PRIdOFF  ", %" B_PRIuSIZE ")\n", this, request, lineOffset,
271 		requestOffset, requestLength);
272 
273 	// check whether there are pages of the cache line and the mark them used
274 	page_num_t firstPageOffset = lineOffset / B_PAGE_SIZE;
275 	page_num_t linePageCount = (lineSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
276 
277 	AutoLocker<VMCache> cacheLocker(fCache);
278 
279 	page_num_t firstMissing = 0;
280 	page_num_t lastMissing = 0;
281 	page_num_t missingPages = 0;
282 	page_num_t pageOffset = firstPageOffset;
283 
284 	VMCachePagesTree::Iterator it = fCache->pages.GetIterator(pageOffset, true,
285 		true);
286 	while (pageOffset < firstPageOffset + linePageCount) {
287 		vm_page* page = it.Next();
288 		page_num_t currentPageOffset;
289 		if (page == NULL
290 			|| page->cache_offset >= firstPageOffset + linePageCount) {
291 			page = NULL;
292 			currentPageOffset = firstPageOffset + linePageCount;
293 		} else
294 			currentPageOffset = page->cache_offset;
295 
296 		if (pageOffset < currentPageOffset) {
297 			// pages are missing
298 			if (missingPages == 0)
299 				firstMissing = pageOffset;
300 			lastMissing = currentPageOffset - 1;
301 			missingPages += currentPageOffset - pageOffset;
302 
303 			for (; pageOffset < currentPageOffset; pageOffset++)
304 				fPages[pageOffset - firstPageOffset] = NULL;
305 		}
306 
307 		if (page != NULL) {
308 			fPages[pageOffset++ - firstPageOffset] = page;
309 			DEBUG_PAGE_ACCESS_START(page);
310 			vm_page_set_state(page, PAGE_STATE_UNUSED);
311 			DEBUG_PAGE_ACCESS_END(page);
312 		}
313 	}
314 
315 	cacheLocker.Unlock();
316 
317 	bool isVIP = (request->Flags() & B_VIP_IO_REQUEST) != 0;
318 
319 	if (missingPages > 0) {
320 // TODO: If this is a read request and the missing pages range doesn't intersect
321 // with the request, just satisfy the request and don't read anything at all.
322 		// There are pages of the cache line missing. We have to allocate fresh
323 		// ones.
324 
325 		// reserve
326 		vm_page_reservation reservation;
327 		if (!vm_page_try_reserve_pages(&reservation, missingPages,
328 				VM_PRIORITY_SYSTEM)) {
329 			_DiscardPages(firstMissing - firstPageOffset, missingPages);
330 
331 			// fall back to uncached transfer
332 			return _TransferRequestLineUncached(request, lineOffset,
333 				requestOffset, requestLength);
334 		}
335 
336 		// Allocate the missing pages and remove the already existing pages in
337 		// the range from the cache. We're going to read/write the whole range
338 		// anyway and this way we can sort it, possibly improving the physical
339 		// vecs.
340 // TODO: When memory is low, we should consider cannibalizing ourselves or
341 // simply transferring past the cache!
342 		for (pageOffset = firstMissing; pageOffset <= lastMissing;
343 				pageOffset++) {
344 			page_num_t index = pageOffset - firstPageOffset;
345 			if (fPages[index] == NULL) {
346 				fPages[index] = vm_page_allocate_page(&reservation,
347 					PAGE_STATE_UNUSED);
348 				DEBUG_PAGE_ACCESS_END(fPages[index]);
349 			} else {
350 				cacheLocker.Lock();
351 				fCache->RemovePage(fPages[index]);
352 				cacheLocker.Unlock();
353 			}
354 		}
355 
356 		missingPages = lastMissing - firstMissing + 1;
357 
358 		// sort the page array by physical page number
359 		std::sort(fPages + firstMissing - firstPageOffset,
360 			fPages + lastMissing - firstPageOffset + 1,
361 			page_physical_number_less);
362 
363 		// add the pages to the cache
364 		cacheLocker.Lock();
365 
366 		for (pageOffset = firstMissing; pageOffset <= lastMissing;
367 				pageOffset++) {
368 			page_num_t index = pageOffset - firstPageOffset;
369 			fCache->InsertPage(fPages[index], (off_t)pageOffset * B_PAGE_SIZE);
370 		}
371 
372 		cacheLocker.Unlock();
373 
374 		// Read in the missing pages, if this is a read request or a write
375 		// request that doesn't cover the complete missing range.
376 		if (request->IsRead()
377 			|| requestOffset < (off_t)firstMissing * B_PAGE_SIZE
378 			|| requestOffset + (off_t)requestLength
379 				> (off_t)(lastMissing + 1) * B_PAGE_SIZE) {
380 			status_t error = _TransferPages(firstMissing - firstPageOffset,
381 				missingPages, false, isVIP);
382 			if (error != B_OK) {
383 				dprintf("IOCache::_TransferRequestLine(): Failed to read into "
384 					"cache (offset: %" B_PRIdOFF ", length: %" B_PRIuSIZE "), "
385 					"trying uncached read (offset: %" B_PRIdOFF ", length: %"
386 					B_PRIuSIZE ")\n", (off_t)firstMissing * B_PAGE_SIZE,
387 					(size_t)missingPages * B_PAGE_SIZE, requestOffset,
388 					requestLength);
389 
390 				_DiscardPages(firstMissing - firstPageOffset, missingPages);
391 
392 				// Try again using an uncached transfer
393 				return _TransferRequestLineUncached(request, lineOffset,
394 					requestOffset, requestLength);
395 			}
396 		}
397 	}
398 
399 	if (request->IsRead()) {
400 		// copy data to request
401 		status_t error = _CopyPages(request, requestOffset - lineOffset,
402 			requestOffset, requestLength, true);
403 		_CachePages(0, linePageCount);
404 		return error;
405 	}
406 
407 	// copy data from request
408 	status_t error = _CopyPages(request, requestOffset - lineOffset,
409 		requestOffset, requestLength, false);
410 	if (error != B_OK) {
411 		_DiscardPages(0, linePageCount);
412 		return error;
413 	}
414 
415 	// write the pages to disk
416 	page_num_t firstPage = (requestOffset - lineOffset) / B_PAGE_SIZE;
417 	page_num_t endPage = (requestOffset + requestLength - lineOffset
418 		+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
419 	error = _TransferPages(firstPage, endPage - firstPage, true, isVIP);
420 
421 	if (error != B_OK) {
422 		_DiscardPages(firstPage, endPage - firstPage);
423 		return error;
424 	}
425 
426 	_CachePages(0, linePageCount);
427 	return error;
428 }
429 
430 
431 status_t
432 IOCache::_TransferRequestLineUncached(IORequest* request, off_t lineOffset,
433 	off_t requestOffset, size_t requestLength)
434 {
435 	TRACE("%p->IOCache::_TransferRequestLineUncached(%p, %" B_PRIdOFF
436 		", %" B_PRIdOFF  ", %" B_PRIuSIZE ")\n", this, request, lineOffset,
437 		requestOffset, requestLength);
438 
439 	// Advance the request to the interesting offset, so the DMAResource can
440 	// provide us with fitting operations.
441 	off_t actualRequestOffset
442 		= request->Offset() + request->Length() - request->RemainingBytes();
443 	if (actualRequestOffset > requestOffset) {
444 		dprintf("IOCache::_TransferRequestLineUncached(): Request %p advanced "
445 			"beyond current cache line (%" B_PRIdOFF " vs. %" B_PRIdOFF ")\n",
446 			request, actualRequestOffset, requestOffset);
447 		return B_BAD_VALUE;
448 	}
449 
450 	if (actualRequestOffset < requestOffset)
451 		request->Advance(requestOffset - actualRequestOffset);
452 
453 	generic_size_t requestRemaining = request->RemainingBytes() - requestLength;
454 
455 	// Process single operations until the specified part of the request is
456 	// finished or until an error occurs.
457 	Operation operation;
458 	operation.finishedCondition.Init(this, "I/O cache operation finished");
459 
460 	while (request->RemainingBytes() > requestRemaining
461 		&& request->Status() > 0) {
462 		status_t error = fDMAResource->TranslateNext(request, &operation,
463 			request->RemainingBytes() - requestRemaining);
464 		if (error != B_OK)
465 			return error;
466 
467 		error = _DoOperation(operation);
468 
469 		request->OperationFinished(&operation, error, false,
470 			error == B_OK ? operation.OriginalLength() : 0);
471 		request->SetUnfinished();
472 			// Keep the request in unfinished state. ScheduleRequest() will set
473 			// the final status and notify.
474 
475 		fDMAResource->RecycleBuffer(operation.Buffer());
476 
477 		if (error != B_OK) {
478 			TRACE("%p->IOCache::_TransferRequestLineUncached(): operation at "
479 				"%" B_PRIdOFF " failed: %s\n", this, operation.Offset(),
480 				strerror(error));
481 			return error;
482 		}
483 	}
484 
485 	return B_OK;
486 }
487 
488 
489 status_t
490 IOCache::_DoOperation(Operation& operation)
491 {
492 	TRACE("%p->IOCache::_DoOperation(%" B_PRIdOFF ", %" B_PRIuSIZE ")\n", this,
493 		operation.Offset(), operation.Length());
494 
495 	while (true) {
496 		ConditionVariableEntry waitEntry;
497 		operation.finishedCondition.Add(&waitEntry);
498 
499 		status_t error = fIOCallback(fIOCallbackData, &operation);
500 		if (error != B_OK) {
501 			operation.finishedCondition.NotifyAll(error);
502 				// removes the entry from the variable
503 			return error;
504 		}
505 
506 		// wait for the operation to finish
507 		error = waitEntry.Wait();
508 		if (error != B_OK)
509 			return error;
510 
511 		if (operation.Finish())
512 			return B_OK;
513 	}
514 }
515 
516 
517 status_t
518 IOCache::_TransferPages(size_t firstPage, size_t pageCount, bool isWrite,
519 	bool isVIP)
520 {
521 	TRACE("%p->IOCache::_TransferPages(%" B_PRIuSIZE ", %" B_PRIuSIZE
522 		", write: %d, vip: %d)\n", this, firstPage, pageCount, isWrite, isVIP);
523 
524 	off_t firstPageOffset = (off_t)fPages[firstPage]->cache_offset
525 		* B_PAGE_SIZE;
526 	generic_size_t requestLength = std::min(
527 			firstPageOffset + (off_t)pageCount * B_PAGE_SIZE, fDeviceCapacity)
528 		- firstPageOffset;
529 
530 	// prepare the I/O vecs
531 	size_t vecCount = 0;
532 	size_t endPage = firstPage + pageCount;
533 	phys_addr_t vecsEndAddress = 0;
534 	for (size_t i = firstPage; i < endPage; i++) {
535 		phys_addr_t pageAddress
536 			= (phys_addr_t)fPages[i]->physical_page_number * B_PAGE_SIZE;
537 		if (vecCount == 0 || pageAddress != vecsEndAddress) {
538 			fVecs[vecCount].base = pageAddress;
539 			fVecs[vecCount++].length = B_PAGE_SIZE;
540 			vecsEndAddress = pageAddress + B_PAGE_SIZE;
541 		} else {
542 			// extend the previous vec
543 			fVecs[vecCount - 1].length += B_PAGE_SIZE;
544 			vecsEndAddress += B_PAGE_SIZE;
545 		}
546 	}
547 
548 	// Don't try to read past the end of the device just to fill a page;
549 	// this makes sure that sum(fVecs[].length) == requestLength
550 	generic_size_t padLength = B_PAGE_SIZE - requestLength % B_PAGE_SIZE;
551 	if (vecCount > 0 && padLength != B_PAGE_SIZE)
552 		fVecs[vecCount - 1].length -= padLength;
553 
554 	// create a request for the transfer
555 	IORequest request;
556 	status_t error = request.Init(firstPageOffset, fVecs, vecCount,
557 		requestLength, isWrite,
558 		B_PHYSICAL_IO_REQUEST | (isVIP ? B_VIP_IO_REQUEST : 0));
559 	if (error != B_OK)
560 		return error;
561 
562 	// Process single operations until the complete request is finished or
563 	// until an error occurs.
564 	Operation operation;
565 	operation.finishedCondition.Init(this, "I/O cache operation finished");
566 
567 	while (request.RemainingBytes() > 0 && request.Status() > 0) {
568 		error = fDMAResource->TranslateNext(&request, &operation,
569 			requestLength);
570 		if (error != B_OK)
571 			return error;
572 
573 		error = _DoOperation(operation);
574 
575 		request.RemoveOperation(&operation);
576 
577 		fDMAResource->RecycleBuffer(operation.Buffer());
578 
579 		if (error != B_OK) {
580 			TRACE("%p->IOCache::_TransferLine(): operation at %" B_PRIdOFF
581 				" failed: %s\n", this, operation.Offset(), strerror(error));
582 			return error;
583 		}
584 	}
585 
586 	return B_OK;
587 }
588 
589 
590 /*!	Frees all pages in given range of the \c fPages array.
591 	\c NULL entries in the range are OK. All non \c NULL entries must refer
592 	to pages with \c PAGE_STATE_UNUSED. The pages may belong to \c fCache or
593 	may not have a cache.
594 	\c fCache must not be locked.
595 */
596 void
597 IOCache::_DiscardPages(size_t firstPage, size_t pageCount)
598 {
599 	TRACE("%p->IOCache::_DiscardPages(%" B_PRIuSIZE ", %" B_PRIuSIZE ")\n",
600 		this, firstPage, pageCount);
601 
602 	AutoLocker<VMCache> cacheLocker(fCache);
603 
604 	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
605 		vm_page* page = fPages[i];
606 		if (page == NULL)
607 			continue;
608 
609 		DEBUG_PAGE_ACCESS_START(page);
610 
611 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
612 			"page: %p @! page -m %p", page, page);
613 
614 		if (page->Cache() != NULL)
615 			fCache->RemovePage(page);
616 
617 		vm_page_free(NULL, page);
618 	}
619 }
620 
621 
622 /*!	Marks all pages in the given range of the \c fPages array cached.
623 	There must not be any \c NULL entries in the given array range. All pages
624 	must belong to \c cache and have state \c PAGE_STATE_UNUSED.
625 	\c fCache must not be locked.
626 */
627 void
628 IOCache::_CachePages(size_t firstPage, size_t pageCount)
629 {
630 	TRACE("%p->IOCache::_CachePages(%" B_PRIuSIZE ", %" B_PRIuSIZE ")\n",
631 		this, firstPage, pageCount);
632 
633 	AutoLocker<VMCache> cacheLocker(fCache);
634 
635 	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
636 		vm_page* page = fPages[i];
637 		ASSERT(page != NULL);
638 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED
639 				&& page->Cache() == fCache,
640 			"page: %p @! page -m %p", page, page);
641 
642 		DEBUG_PAGE_ACCESS_START(page);
643 		vm_page_set_state(page, PAGE_STATE_CACHED);
644 		DEBUG_PAGE_ACCESS_END(page);
645 	}
646 }
647 
648 
649 /*!	Copies the contents of pages in \c fPages to \a request, or vice versa.
650 	\param request The request.
651 	\param pagesRelativeOffset The offset relative to \c fPages[0] where to
652 		start copying.
653 	\param requestOffset The request offset where to start copying.
654 	\param requestLength The number of bytes to copy.
655 	\param toRequest If \c true the copy directory is from \c fPages to
656 		\a request, otherwise the other way around.
657 	\return \c B_OK, if copying went fine, another error code otherwise.
658 */
659 status_t
660 IOCache::_CopyPages(IORequest* request, size_t pagesRelativeOffset,
661 	off_t requestOffset, size_t requestLength, bool toRequest)
662 {
663 	TRACE("%p->IOCache::_CopyPages(%p, %" B_PRIuSIZE ", %" B_PRIdOFF
664 		", %" B_PRIuSIZE ", %d)\n", this, request, pagesRelativeOffset,
665 		requestOffset, requestLength, toRequest);
666 
667 	size_t firstPage = pagesRelativeOffset / B_PAGE_SIZE;
668 	size_t endPage = (pagesRelativeOffset + requestLength + B_PAGE_SIZE - 1)
669 		/ B_PAGE_SIZE;
670 
671 	// map the pages
672 	status_t error = _MapPages(firstPage, endPage);
673 // TODO: _MapPages() cannot fail, so the fallback is never needed. Test which
674 // method is faster (probably the active one)!
675 #if 0
676 	if (error != B_OK) {
677 		// fallback to copying individual pages
678 		size_t inPageOffset = pagesRelativeOffset % B_PAGE_SIZE;
679 		for (size_t i = firstPage; i < endPage; i++) {
680 			// map the page
681 			void* handle;
682 			addr_t address;
683 			error = vm_get_physical_page(
684 				fPages[i]->physical_page_number * B_PAGE_SIZE, &address,
685 				&handle);
686 			if (error != B_OK)
687 				return error;
688 
689 			// copy the page's data
690 			size_t toCopy = std::min(B_PAGE_SIZE - inPageOffset, requestLength);
691 
692 			if (toRequest) {
693 				error = request->CopyData((uint8*)(address + inPageOffset),
694 					requestOffset, toCopy);
695 			} else {
696 				error = request->CopyData(requestOffset,
697 					(uint8*)(address + inPageOffset), toCopy);
698 			}
699 
700 			// unmap the page
701 			vm_put_physical_page(address, handle);
702 
703 			if (error != B_OK)
704 				return error;
705 
706 			inPageOffset = 0;
707 			requestOffset += toCopy;
708 			requestLength -= toCopy;
709 		}
710 
711 		return B_OK;
712 	}
713 #endif	// 0
714 
715 	// copy
716 	if (toRequest) {
717 		error = request->CopyData((uint8*)fAreaBase + pagesRelativeOffset,
718 			requestOffset, requestLength);
719 	} else {
720 		error = request->CopyData(requestOffset,
721 			(uint8*)fAreaBase + pagesRelativeOffset, requestLength);
722 	}
723 
724 	// unmap the pages
725 	_UnmapPages(firstPage, endPage);
726 
727 	return error;
728 }
729 
730 
731 /*!	Maps a range of pages in \c fPages into fArea.
732 
733 	If successful, it must be balanced by a call to _UnmapPages().
734 
735 	\param firstPage The \c fPages relative index of the first page to map.
736 	\param endPage The \c fPages relative index of the page after the last page
737 		to map.
738 	\return \c B_OK, if mapping went fine, another error code otherwise.
739 */
740 status_t
741 IOCache::_MapPages(size_t firstPage, size_t endPage)
742 {
743 	VMTranslationMap* translationMap
744 		= VMAddressSpace::Kernel()->TranslationMap();
745 
746 	translationMap->Lock();
747 
748 	for (size_t i = firstPage; i < endPage; i++) {
749 		vm_page* page = fPages[i];
750 
751 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
752 			"page: %p @! page -m %p", page, page);
753 
754 		translationMap->Map((addr_t)fAreaBase + i * B_PAGE_SIZE,
755 			page->physical_page_number * B_PAGE_SIZE,
756 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &fMappingReservation);
757 		// NOTE: We don't increment gMappedPagesCount. Our pages have state
758 		// PAGE_STATE_UNUSED anyway and we map them only for a short time.
759 	}
760 
761 	translationMap->Unlock();
762 
763 	return B_OK;
764 }
765 
766 
767 /*!	Unmaps a range of pages in \c fPages into fArea.
768 
769 	Must balance a call to _MapPages().
770 
771 	\param firstPage The \c fPages relative index of the first page to unmap.
772 	\param endPage The \c fPages relative index of the page after the last page
773 		to unmap.
774 */
775 void
776 IOCache::_UnmapPages(size_t firstPage, size_t endPage)
777 {
778 	VMTranslationMap* translationMap
779 		= VMAddressSpace::Kernel()->TranslationMap();
780 
781 	translationMap->Lock();
782 
783 	translationMap->Unmap((addr_t)fAreaBase + firstPage * B_PAGE_SIZE,
784 		(addr_t)fAreaBase + endPage * B_PAGE_SIZE - 1);
785 
786 	translationMap->Unlock();
787 }
788