xref: /haiku/src/system/kernel/device_manager/IOCache.cpp (revision 125183f9e5c136781f71c879faaeab43fdc3ea7b)
1 /*
2  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "IOCache.h"
8 
9 #include <algorithm>
10 
11 #include <condition_variable.h>
12 #include <heap.h>
13 #include <low_resource_manager.h>
14 #include <util/AutoLock.h>
15 #include <vm/vm.h>
16 #include <vm/VMAddressSpace.h>
17 #include <vm/VMCache.h>
18 #include <vm/VMTranslationMap.h>
19 
20 
21 //#define TRACE_IO_CACHE 1
22 #ifdef TRACE_IO_CACHE
23 #	define TRACE(format...)	dprintf(format)
24 #else
25 #	define TRACE(format...)	do {} while (false)
26 #endif
27 
28 
29 static inline bool
30 page_physical_number_less(const vm_page* a, const vm_page* b)
31 {
32 	return a->physical_page_number < b->physical_page_number;
33 }
34 
35 
36 struct IOCache::Operation : IOOperation {
37 	ConditionVariable	finishedCondition;
38 };
39 
40 
41 IOCache::IOCache(DMAResource* resource, size_t cacheLineSize)
42 	:
43 	IOScheduler(resource),
44 	fDeviceCapacity(0),
45 	fLineSize(cacheLineSize),
46 	fPagesPerLine(cacheLineSize / B_PAGE_SIZE),
47 	fArea(-1),
48 	fCache(NULL),
49 	fPages(NULL),
50 	fVecs(NULL)
51 {
52 	TRACE("%p->IOCache::IOCache(%p, %" B_PRIuSIZE ")\n", this, resource,
53 		cacheLineSize);
54 
55 	if (cacheLineSize < B_PAGE_SIZE
56 		|| (cacheLineSize & (cacheLineSize - 1)) != 0) {
57 		panic("Invalid cache line size (%" B_PRIuSIZE "). Must be a power of 2 "
58 			"multiple of the page size.", cacheLineSize);
59 	}
60 
61 	mutex_init(&fSerializationLock, "I/O cache request serialization");
62 
63 	fLineSizeShift = 0;
64 	while (cacheLineSize != 1) {
65 		fLineSizeShift++;
66 		cacheLineSize >>= 1;
67 	}
68 }
69 
70 
71 IOCache::~IOCache()
72 {
73 	if (fArea >= 0) {
74 		vm_page_unreserve_pages(&fMappingReservation);
75 		delete_area(fArea);
76 	}
77 
78 	delete[] fPages;
79 	delete[] fVecs;
80 
81 	mutex_destroy(&fSerializationLock);
82 }
83 
84 
85 status_t
86 IOCache::Init(const char* name)
87 {
88 	TRACE("%p->IOCache::Init(\"%s\")\n", this, name);
89 
90 	status_t error = IOScheduler::Init(name);
91 	if (error != B_OK)
92 		return error;
93 
94 	// create the area for mapping cache lines
95 	fArea = vm_create_null_area(B_SYSTEM_TEAM, "I/O cache line", &fAreaBase,
96 		B_ANY_KERNEL_ADDRESS, fLineSize, 0);
97 	if (fArea < 0)
98 		return fArea;
99 
100 	// reserve pages for mapping a complete cache line
101 	VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
102 	VMTranslationMap* translationMap = addressSpace->TranslationMap();
103 	size_t pagesNeeded = translationMap->MaxPagesNeededToMap((addr_t)fAreaBase,
104 		(addr_t)fAreaBase + fLineSize - 1);
105 	vm_page_reserve_pages(&fMappingReservation, pagesNeeded,
106 		VM_PRIORITY_SYSTEM);
107 
108 	// get the area's cache
109 	VMArea* area = VMAreaHash::Lookup(fArea);
110 	if (area == NULL) {
111 		panic("IOCache::Init(): Where's our area (id: %" B_PRId32 ")?!", fArea);
112 		return B_ERROR;
113 	}
114 	fCache = area->cache;
115 
116 	// allocate arrays for pages and iovecs
117 	fPages = new(std::nothrow) vm_page*[fPagesPerLine];
118 	fVecs = new(std::nothrow) iovec[fPagesPerLine];
119 	if (fPages == NULL || fVecs == NULL)
120 		return B_NO_MEMORY;
121 
122 	return B_OK;
123 }
124 
125 
126 void
127 IOCache::SetDeviceCapacity(off_t deviceCapacity)
128 {
129 	TRACE("%p->IOCache::SetDeviceCapacity(%" B_PRIdOFF ")\n", this,
130 		deviceCapacity);
131 
132 	MutexLocker serializationLocker(fSerializationLock);
133 	AutoLocker<VMCache> cacheLocker(fCache);
134 
135 	fDeviceCapacity = deviceCapacity;
136 
137 	// new media -- burn all cached data
138 	while (vm_page* page = fCache->pages.Root()) {
139 		DEBUG_PAGE_ACCESS_START(page);
140 		fCache->RemovePage(page);
141 		vm_page_free(NULL, page);
142 	}
143 }
144 
145 
146 status_t
147 IOCache::ScheduleRequest(IORequest* request)
148 {
149 	TRACE("%p->IOCache::ScheduleRequest(%p)\n", this, request);
150 
151 	// lock the request's memory
152 	status_t error;
153 	IOBuffer* buffer = request->Buffer();
154 	if (buffer->IsVirtual()) {
155 		error = buffer->LockMemory(request->Team(), request->IsWrite());
156 		if (error != B_OK) {
157 			request->SetStatusAndNotify(error);
158 			return error;
159 		}
160 	}
161 
162 	// we completely serialize all I/O in FIFO order
163 	MutexLocker serializationLocker(fSerializationLock);
164 	size_t bytesTransferred = 0;
165 	error = _DoRequest(request, bytesTransferred);
166 	serializationLocker.Unlock();
167 
168 	// unlock memory
169 	if (buffer->IsVirtual())
170 		buffer->UnlockMemory(request->Team(), request->IsWrite());
171 
172 	// set status and notify
173 	if (error == B_OK) {
174 		request->SetTransferredBytes(bytesTransferred < request->Length(),
175 			bytesTransferred);
176 		request->SetStatusAndNotify(B_OK);
177 	} else
178 		request->SetStatusAndNotify(error);
179 
180 	return error;
181 }
182 
183 
184 void
185 IOCache::AbortRequest(IORequest* request, status_t status)
186 {
187 	// TODO:...
188 }
189 
190 
191 void
192 IOCache::OperationCompleted(IOOperation* operation, status_t status,
193 	size_t transferredBytes)
194 {
195 	if (status == B_OK) {
196 		// always fail in case of partial transfers
197 		((Operation*)operation)->finishedCondition.NotifyAll(false,
198 			transferredBytes == operation->Length() ? B_OK : B_ERROR);
199 	} else
200 		((Operation*)operation)->finishedCondition.NotifyAll(false, status);
201 }
202 
203 
204 void
205 IOCache::Dump() const
206 {
207 	kprintf("IOCache at %p\n", this);
208 	kprintf("  DMA resource:   %p\n", fDMAResource);
209 }
210 
211 
212 status_t
213 IOCache::_DoRequest(IORequest* request, size_t& _bytesTransferred)
214 {
215 	off_t offset = request->Offset();
216 	size_t length = request->Length();
217 
218 	TRACE("%p->IOCache::ScheduleRequest(%p): offset: %" B_PRIdOFF
219 		", length: %" B_PRIuSIZE "\n", this, request, offset, length);
220 
221 	if (offset < 0 || offset > fDeviceCapacity)
222 		return B_BAD_VALUE;
223 
224 	// truncate the request to the device capacity
225 	if (fDeviceCapacity - offset < length)
226 		length = fDeviceCapacity - offset;
227 
228 	_bytesTransferred = 0;
229 
230 	while (length > 0) {
231 		// the start of the current cache line
232 		off_t lineOffset = (offset >> fLineSizeShift) << fLineSizeShift;
233 
234 		// intersection of request and cache line
235 		off_t cacheLineEnd = std::min(lineOffset + fLineSize, fDeviceCapacity);
236 		size_t requestLineLength
237 			= std::min(cacheLineEnd - offset, (off_t)length);
238 
239 		// transfer the data of the cache line
240 		status_t error = _TransferRequestLine(request, lineOffset,
241 			cacheLineEnd - lineOffset, offset, requestLineLength);
242 		if (error != B_OK)
243 			return error;
244 
245 		offset = cacheLineEnd;
246 		length -= requestLineLength;
247 		_bytesTransferred += requestLineLength;
248 	}
249 
250 	return B_OK;
251 }
252 
253 
254 status_t
255 IOCache::_TransferRequestLine(IORequest* request, off_t lineOffset,
256 	size_t lineSize, off_t requestOffset, size_t requestLength)
257 {
258 	TRACE("%p->IOCache::_TransferRequestLine(%p, %" B_PRIdOFF
259 		", %" B_PRIdOFF  ", %" B_PRIuSIZE ")\n", this, request, lineOffset,
260 		requestOffset, requestLength);
261 
262 	// check whether there are pages of the cache line and the mark them used
263 	page_num_t firstPageOffset = lineOffset / B_PAGE_SIZE;
264 	page_num_t linePageCount = (lineSize + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
265 
266 	AutoLocker<VMCache> cacheLocker(fCache);
267 
268 	page_num_t firstMissing = 0;
269 	page_num_t lastMissing = 0;
270 	page_num_t missingPages = 0;
271 	page_num_t pageOffset = firstPageOffset;
272 
273 	VMCachePagesTree::Iterator it = fCache->pages.GetIterator(pageOffset, true,
274 		true);
275 	while (pageOffset < firstPageOffset + linePageCount) {
276 		vm_page* page = it.Next();
277 		page_num_t currentPageOffset;
278 		if (page == NULL
279 			|| page->cache_offset >= firstPageOffset + linePageCount) {
280 			page = NULL;
281 			currentPageOffset = firstPageOffset + linePageCount;
282 		} else
283 			currentPageOffset = page->cache_offset;
284 
285 		if (pageOffset < currentPageOffset) {
286 			// pages are missing
287 			if (missingPages == 0)
288 				firstMissing = pageOffset;
289 			lastMissing = currentPageOffset - 1;
290 			missingPages += currentPageOffset - pageOffset;
291 
292 			for (; pageOffset < currentPageOffset; pageOffset++)
293 				fPages[pageOffset - firstPageOffset] = NULL;
294 		}
295 
296 		if (page != NULL) {
297 			fPages[pageOffset++ - firstPageOffset] = page;
298 			DEBUG_PAGE_ACCESS_START(page);
299 			vm_page_set_state(page, PAGE_STATE_UNUSED);
300 			DEBUG_PAGE_ACCESS_END(page);
301 		}
302 	}
303 
304 	cacheLocker.Unlock();
305 
306 	bool isVIP = (request->Flags() & B_VIP_IO_REQUEST) != 0;
307 
308 	if (missingPages > 0) {
309 // TODO: If this is a read request and the missing pages range doesn't intersect
310 // with the request, just satisfy the request and don't read anything at all.
311 		// There are pages of the cache line missing. We have to allocate fresh
312 		// ones.
313 
314 		// reserve
315 		vm_page_reservation reservation;
316 		if (!vm_page_try_reserve_pages(&reservation, missingPages,
317 				VM_PRIORITY_SYSTEM)) {
318 			_DiscardPages(firstMissing - firstPageOffset, missingPages);
319 
320 			// fall back to uncached transfer
321 			return _TransferRequestLineUncached(request, lineOffset,
322 				requestOffset, requestLength);
323 		}
324 
325 		// Allocate the missing pages and remove the already existing pages in
326 		// the range from the cache. We're going to read/write the whole range
327 		// anyway and this way we can sort it, possibly improving the physical
328 		// vecs.
329 // TODO: When memory is low, we should consider cannibalizing ourselves or
330 // simply transferring past the cache!
331 		for (pageOffset = firstMissing; pageOffset <= lastMissing;
332 				pageOffset++) {
333 			page_num_t index = pageOffset - firstPageOffset;
334 			if (fPages[index] == NULL) {
335 				fPages[index] = vm_page_allocate_page( &reservation,
336 					PAGE_STATE_UNUSED);
337 				DEBUG_PAGE_ACCESS_END(fPages[index]);
338 			} else {
339 				cacheLocker.Lock();
340 				fCache->RemovePage(fPages[index]);
341 				cacheLocker.Unlock();
342 			}
343 		}
344 
345 		missingPages = lastMissing - firstMissing + 1;
346 
347 		// sort the page array by physical page number
348 		std::sort(fPages + firstMissing - firstPageOffset,
349 			fPages + lastMissing - firstPageOffset + 1,
350 			page_physical_number_less);
351 
352 		// add the pages to the cache
353 		cacheLocker.Lock();
354 
355 		for (pageOffset = firstMissing; pageOffset <= lastMissing;
356 				pageOffset++) {
357 			page_num_t index = pageOffset - firstPageOffset;
358 			fCache->InsertPage(fPages[index], (off_t)pageOffset * B_PAGE_SIZE);
359 		}
360 
361 		cacheLocker.Unlock();
362 
363 		// Read in the missing pages, if this is a read request or a write
364 		// request that doesn't cover the complete missing range.
365 		if (request->IsRead()
366 			|| requestOffset < (off_t)firstMissing * B_PAGE_SIZE
367 			|| requestOffset + requestLength
368 				> (lastMissing + 1) * B_PAGE_SIZE) {
369 			status_t error = _TransferPages(firstMissing - firstPageOffset,
370 				missingPages, false, isVIP);
371 			if (error != B_OK) {
372 				_DiscardPages(firstMissing - firstPageOffset, missingPages);
373 				return error;
374 			}
375 		}
376 	}
377 
378 	if (request->IsRead()) {
379 		// copy data to request
380 		status_t error = _CopyPages(request, requestOffset - lineOffset,
381 			requestOffset, requestLength, true);
382 		_CachePages(0, linePageCount);
383 		return error;
384 	} else {
385 		// copy data from request
386 		status_t error = _CopyPages(request, requestOffset - lineOffset,
387 			requestOffset, requestLength, false);
388 		if (error != B_OK) {
389 			_DiscardPages(0, linePageCount);
390 			return error;
391 		}
392 
393 		// write the pages to disk
394 		page_num_t firstPage = (requestOffset - lineOffset) / B_PAGE_SIZE;
395 		page_num_t endPage = (requestOffset + requestLength - lineOffset
396 			+ B_PAGE_SIZE - 1) / B_PAGE_SIZE;
397 		error = _TransferPages(firstPage, endPage - firstPage, true, isVIP);
398 
399 		if (error != B_OK) {
400 			_DiscardPages(firstPage, endPage - firstPage);
401 			return error;
402 		}
403 
404 		_CachePages(0, linePageCount);
405 		return error;
406 	}
407 }
408 
409 
410 status_t
411 IOCache::_TransferRequestLineUncached(IORequest* request, off_t lineOffset,
412 	off_t requestOffset, size_t requestLength)
413 {
414 	TRACE("%p->IOCache::_TransferRequestLineUncached(%p, %" B_PRIdOFF
415 		", %" B_PRIdOFF  ", %" B_PRIuSIZE ")\n", this, request, lineOffset,
416 		requestOffset, requestLength);
417 
418 	// Advance the request to the interesting offset, so the DMAResource can
419 	// provide us with fitting operations.
420 	off_t actualRequestOffset
421 		= request->Offset() + request->Length() - request->RemainingBytes();
422 	if (actualRequestOffset > requestOffset) {
423 		dprintf("IOCache::_TransferRequestLineUncached(): Request %p advanced "
424 			"beyond current cache line (%" B_PRIdOFF " vs. %" B_PRIdOFF ")\n",
425 			request, actualRequestOffset, requestOffset);
426 		return B_BAD_VALUE;
427 	}
428 
429 	if (actualRequestOffset < requestOffset)
430 		request->Advance(requestOffset - actualRequestOffset);
431 
432 	size_t requestRemaining = request->RemainingBytes() - requestLength;
433 
434 	// Process single operations until the specified part of the request is
435 	// finished or until an error occurs.
436 	Operation operation;
437 	operation.finishedCondition.Init(this, "I/O cache operation finished");
438 
439 	while (request->RemainingBytes() > requestRemaining
440 		&& request->Status() > 0) {
441 		status_t error = fDMAResource->TranslateNext(request, &operation,
442 			request->RemainingBytes() - requestRemaining);
443 		if (error != B_OK)
444 			return error;
445 
446 		error = _DoOperation(operation);
447 
448 		request->OperationFinished(&operation, error, false,
449 			error == B_OK ? operation.OriginalLength() : 0);
450 		request->SetUnfinished();
451 			// Keep the request in unfinished state. ScheduleRequest() will set
452 			// the final status and notify.
453 
454 		if (fDMAResource != NULL)
455 			fDMAResource->RecycleBuffer(operation.Buffer());
456 
457 		if (error != B_OK) {
458 			TRACE("%p->IOCache::_TransferRequestLineUncached(): operation at "
459 				"%" B_PRIdOFF " failed: %s\n", this, operation.Offset(),
460 				strerror(error));
461 			return error;
462 		}
463 	}
464 
465 	return B_OK;
466 }
467 
468 
469 status_t
470 IOCache::_DoOperation(Operation& operation)
471 {
472 	TRACE("%p->IOCache::_DoOperation(%" B_PRIdOFF ", %" B_PRIuSIZE ")\n", this,
473 		operation.Offset(), operation.Length());
474 
475 	while (true) {
476 		ConditionVariableEntry waitEntry;
477 		operation.finishedCondition.Add(&waitEntry);
478 
479 		status_t error = fIOCallback(fIOCallbackData, &operation);
480 		if (error != B_OK) {
481 			operation.finishedCondition.NotifyAll(false, error);
482 				// removes the entry from the variable
483 			return error;
484 		}
485 
486 		// wait for the operation to finish
487 		error = waitEntry.Wait();
488 		if (error != B_OK)
489 			return error;
490 
491 		if (operation.Finish())
492 			return B_OK;
493 	}
494 }
495 
496 
497 status_t
498 IOCache::_TransferPages(size_t firstPage, size_t pageCount, bool isWrite,
499 	bool isVIP)
500 {
501 	TRACE("%p->IOCache::_TransferPages(%" B_PRIuSIZE ", %" B_PRIuSIZE
502 		", write: %d, vip: %d)\n", this, firstPage, pageCount, isWrite, isVIP);
503 
504 	off_t firstPageOffset = (off_t)fPages[firstPage]->cache_offset
505 		* B_PAGE_SIZE;
506 	size_t requestLength = std::min(
507 			firstPageOffset + (off_t)pageCount * B_PAGE_SIZE, fDeviceCapacity)
508 		- firstPageOffset;
509 
510 	// prepare the I/O vecs
511 	size_t vecCount = 0;
512 	size_t endPage = firstPage + pageCount;
513 	addr_t vecsEndAddress = 0;
514 	for (size_t i = firstPage; i < endPage; i++) {
515 		addr_t pageAddress = fPages[i]->physical_page_number * B_PAGE_SIZE;
516 		if (vecCount == 0 || pageAddress != vecsEndAddress) {
517 			fVecs[vecCount].iov_base = (void*)pageAddress;
518 			fVecs[vecCount++].iov_len = B_PAGE_SIZE;
519 			vecsEndAddress = pageAddress + B_PAGE_SIZE;
520 		} else {
521 			// extend the previous vec
522 			fVecs[vecCount - 1].iov_len += B_PAGE_SIZE;
523 			vecsEndAddress += B_PAGE_SIZE;
524 		}
525 	}
526 
527 	// create a request for the transfer
528 	IORequest request;
529 	status_t error = request.Init(firstPageOffset, fVecs, vecCount,
530 		requestLength, isWrite,
531 		B_PHYSICAL_IO_REQUEST | (isVIP ? B_VIP_IO_REQUEST : 0));
532 	if (error != B_OK)
533 		return error;
534 
535 	// Process single operations until the complete request is finished or
536 	// until an error occurs.
537 	Operation operation;
538 	operation.finishedCondition.Init(this, "I/O cache operation finished");
539 
540 	while (request.RemainingBytes() > 0 && request.Status() > 0) {
541 		error = fDMAResource->TranslateNext(&request, &operation,
542 			requestLength);
543 		if (error != B_OK)
544 			return error;
545 
546 		error = _DoOperation(operation);
547 
548 		request.RemoveOperation(&operation);
549 
550 		if (fDMAResource != NULL)
551 			fDMAResource->RecycleBuffer(operation.Buffer());
552 
553 		if (error != B_OK) {
554 			TRACE("%p->IOCache::_TransferLine(): operation at %" B_PRIdOFF
555 				" failed: %s\n", this, operation.Offset(), strerror(error));
556 			return error;
557 		}
558 	}
559 
560 	return B_OK;
561 }
562 
563 
564 /*!	Frees all pages in given range of the \c fPages array.
565 	\c NULL entries in the range are OK. All non \c NULL entries must refer
566 	to pages with \c PAGE_STATE_UNUSED. The pages may belong to \c fCache or
567 	may not have a cache.
568 	\c fCache must not be locked.
569 */
570 void
571 IOCache::_DiscardPages(size_t firstPage, size_t pageCount)
572 {
573 	TRACE("%p->IOCache::_DiscardPages(%" B_PRIuSIZE ", %" B_PRIuSIZE ")\n",
574 		this, firstPage, pageCount);
575 
576 	AutoLocker<VMCache> cacheLocker(fCache);
577 
578 	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
579 		vm_page* page = fPages[i];
580 		if (page == NULL)
581 			continue;
582 
583 		DEBUG_PAGE_ACCESS_START(page);
584 
585 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
586 			"page: %p @! page -m %p", page, page);
587 
588 		if (page->Cache() != NULL)
589 			fCache->RemovePage(page);
590 
591 		vm_page_free(NULL, page);
592 	}
593 }
594 
595 
596 /*!	Marks all pages in the given range of the \c fPages array cached.
597 	There must not be any \c NULL entries in the given array range. All pages
598 	must belong to \c cache and have state \c PAGE_STATE_UNUSED.
599 	\c fCache must not be locked.
600 */
601 void
602 IOCache::_CachePages(size_t firstPage, size_t pageCount)
603 {
604 	TRACE("%p->IOCache::_CachePages(%" B_PRIuSIZE ", %" B_PRIuSIZE ")\n",
605 		this, firstPage, pageCount);
606 
607 	AutoLocker<VMCache> cacheLocker(fCache);
608 
609 	for (size_t i = firstPage; i < firstPage + pageCount; i++) {
610 		vm_page* page = fPages[i];
611 		ASSERT(page != NULL);
612 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED
613 				&& page->Cache() == fCache,
614 			"page: %p @! page -m %p", page, page);
615 
616 		DEBUG_PAGE_ACCESS_START(page);
617 		vm_page_set_state(page, PAGE_STATE_CACHED);
618 		DEBUG_PAGE_ACCESS_END(page);
619 	}
620 }
621 
622 
623 /*!	Copies the contents of pages in \c fPages to \a request, or vice versa.
624 	\param request The request.
625 	\param pagesRelativeOffset The offset relative to \c fPages[0] where to
626 		start copying.
627 	\param requestOffset The request offset where to start copying.
628 	\param requestLength The number of bytes to copy.
629 	\param toRequest If \c true the copy directory is from \c fPages to
630 		\a request, otherwise the other way around.
631 	\return \c B_OK, if copying went fine, another error code otherwise.
632 */
633 status_t
634 IOCache::_CopyPages(IORequest* request, size_t pagesRelativeOffset,
635 	off_t requestOffset, size_t requestLength, bool toRequest)
636 {
637 	TRACE("%p->IOCache::_CopyPages(%p, %" B_PRIuSIZE ", %" B_PRIdOFF
638 		", %" B_PRIuSIZE ", %d)\n", this, request, pagesRelativeOffset,
639 		requestOffset, requestLength, toRequest);
640 
641 	size_t firstPage = pagesRelativeOffset / B_PAGE_SIZE;
642 	size_t endPage = (pagesRelativeOffset + requestLength + B_PAGE_SIZE - 1)
643 		/ B_PAGE_SIZE;
644 
645 	// map the pages
646 	status_t error = _MapPages(firstPage, endPage);
647 // TODO: _MapPages() cannot fail, so the fallback is never needed. Test which
648 // method is faster (probably the active one)!
649 #if 0
650 	if (error != B_OK) {
651 		// fallback to copying individual pages
652 		size_t inPageOffset = pagesRelativeOffset % B_PAGE_SIZE;
653 		for (size_t i = firstPage; i < endPage; i++) {
654 			// map the page
655 			void* handle;
656 			addr_t address;
657 			error = vm_get_physical_page(
658 				fPages[i]->physical_page_number * B_PAGE_SIZE, &address,
659 				&handle);
660 			if (error != B_OK)
661 				return error;
662 
663 			// copy the page's data
664 			size_t toCopy = std::min(B_PAGE_SIZE - inPageOffset, requestLength);
665 
666 			if (toRequest) {
667 				error = request->CopyData((uint8*)(address + inPageOffset),
668 					requestOffset, toCopy);
669 			} else {
670 				error = request->CopyData(requestOffset,
671 					(uint8*)(address + inPageOffset), toCopy);
672 			}
673 
674 			// unmap the page
675 			vm_put_physical_page(address, handle);
676 
677 			if (error != B_OK)
678 				return error;
679 
680 			inPageOffset = 0;
681 			requestOffset += toCopy;
682 			requestLength -= toCopy;
683 		}
684 
685 		return B_OK;
686 	}
687 #endif	// 0
688 
689 	// copy
690 	if (toRequest) {
691 		error = request->CopyData((uint8*)fAreaBase + pagesRelativeOffset,
692 			requestOffset, requestLength);
693 	} else {
694 		error = request->CopyData(requestOffset,
695 			(uint8*)fAreaBase + pagesRelativeOffset, requestLength);
696 	}
697 
698 	// unmap the pages
699 	_UnmapPages(firstPage, endPage);
700 
701 	return error;
702 }
703 
704 
705 /*!	Maps a range of pages in \c fPages into fArea.
706 
707 	If successful, it must be balanced by a call to _UnmapPages().
708 
709 	\param firstPage The \c fPages relative index of the first page to map.
710 	\param endPage The \c fPages relative index of the page after the last page
711 		to map.
712 	\return \c B_OK, if mapping went fine, another error code otherwise.
713 */
714 status_t
715 IOCache::_MapPages(size_t firstPage, size_t endPage)
716 {
717 	VMTranslationMap* translationMap
718 		= VMAddressSpace::Kernel()->TranslationMap();
719 
720 	translationMap->Lock();
721 
722 	for (size_t i = firstPage; i < endPage; i++) {
723 		vm_page* page = fPages[i];
724 
725 		ASSERT_PRINT(page->State() == PAGE_STATE_UNUSED,
726 			"page: %p @! page -m %p", page, page);
727 
728 		translationMap->Map((addr_t)fAreaBase + i * B_PAGE_SIZE,
729 			page->physical_page_number * B_PAGE_SIZE,
730 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &fMappingReservation);
731 		// NOTE: We don't increment gMappedPagesCount. Our pages have state
732 		// PAGE_STATE_UNUSED anyway and we map them only for a short time.
733 	}
734 
735 	translationMap->Unlock();
736 
737 	return B_OK;
738 }
739 
740 
741 /*!	Unmaps a range of pages in \c fPages into fArea.
742 
743 	Must balance a call to _MapPages().
744 
745 	\param firstPage The \c fPages relative index of the first page to unmap.
746 	\param endPage The \c fPages relative index of the page after the last page
747 		to unmap.
748 */
749 void
750 IOCache::_UnmapPages(size_t firstPage, size_t endPage)
751 {
752 	VMTranslationMap* translationMap
753 		= VMAddressSpace::Kernel()->TranslationMap();
754 
755 	translationMap->Lock();
756 
757 	translationMap->Unmap((addr_t)fAreaBase + firstPage * B_PAGE_SIZE,
758 		(addr_t)fAreaBase + endPage * B_PAGE_SIZE - 1);
759 
760 	translationMap->Unlock();
761 }
762