xref: /haiku/src/system/kernel/device_manager/IORequest.cpp (revision 4e151bc3093293a8ab47aeae854dc80d04e9b41f)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2017, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "IORequest.h"
9 
10 #include <string.h>
11 
12 #include <arch/debug.h>
13 #include <debug.h>
14 #include <heap.h>
15 #include <kernel.h>
16 #include <team.h>
17 #include <thread.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
21 
22 #include "dma_resources.h"
23 
24 
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 #	define TRACE(x...) dprintf(x)
28 #else
29 #	define TRACE(x...) ;
30 #endif
31 
32 
33 // partial I/O operation phases
34 enum {
35 	PHASE_READ_BEGIN	= 0,
36 	PHASE_READ_END		= 1,
37 	PHASE_DO_ALL		= 2
38 };
39 
40 
41 struct virtual_vec_cookie {
42 	uint32			vec_index;
43 	generic_size_t	vec_offset;
44 	area_id			mapped_area;
45 	void*			physical_page_handle;
46 	addr_t			virtual_address;
47 
48 	virtual_vec_cookie()
49 		:
50 		vec_index(0),
51 		vec_offset(0),
52 		mapped_area(-1),
53 		physical_page_handle(NULL),
54 		virtual_address((addr_t)-1)
55 	{
56 	}
57 
58 	void PutPhysicalPageIfNeeded()
59 	{
60 		if (virtual_address != (addr_t)-1) {
61 			vm_put_physical_page(virtual_address, physical_page_handle);
62 			virtual_address = (addr_t)-1;
63 		}
64 	}
65 };
66 
67 
68 // #pragma mark -
69 
70 
71 IORequestChunk::IORequestChunk()
72 	:
73 	fParent(NULL),
74 	fStatus(1)
75 {
76 }
77 
78 
79 IORequestChunk::~IORequestChunk()
80 {
81 }
82 
83 
84 //	#pragma mark -
85 
86 
87 IOBuffer*
88 IOBuffer::Create(uint32 count, bool vip)
89 {
90 	size_t size = sizeof(IOBuffer) + sizeof(generic_io_vec) * (count - 1);
91 	IOBuffer* buffer
92 		= (IOBuffer*)(malloc_etc(size, vip ? HEAP_PRIORITY_VIP : 0));
93 	if (buffer == NULL)
94 		return NULL;
95 
96 	buffer->fCapacity = count;
97 	buffer->fVecCount = 0;
98 	buffer->fUser = false;
99 	buffer->fPhysical = false;
100 	buffer->fVIP = vip;
101 	buffer->fMemoryLocked = false;
102 
103 	return buffer;
104 }
105 
106 
107 void
108 IOBuffer::Delete()
109 {
110 	free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
111 }
112 
113 
114 void
115 IOBuffer::SetVecs(generic_size_t firstVecOffset, generic_size_t lastVecSize,
116 	const generic_io_vec* vecs, uint32 count, generic_size_t length, uint32 flags)
117 {
118 	memcpy(fVecs, vecs, sizeof(generic_io_vec) * count);
119 
120 	if (count > 0 && firstVecOffset > 0) {
121 		fVecs[0].base += firstVecOffset;
122 		fVecs[0].length -= firstVecOffset;
123 	}
124 	if (lastVecSize > 0)
125 		fVecs[count - 1].length = lastVecSize;
126 
127 	fVecCount = count;
128 	fLength = length;
129 	fPhysical = (flags & B_PHYSICAL_IO_REQUEST) != 0;
130 	fUser = !fPhysical && IS_USER_ADDRESS(vecs[0].base);
131 
132 #if KDEBUG
133 	generic_size_t actualLength = 0;
134 	for (size_t i = 0; i < fVecCount; i++)
135 		actualLength += fVecs[i].length;
136 
137 	ASSERT(actualLength == fLength);
138 #endif
139 }
140 
141 
142 status_t
143 IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
144 {
145 	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
146 	if (cookie == NULL) {
147 		cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
148 			virtual_vec_cookie;
149 		if (cookie == NULL)
150 			return B_NO_MEMORY;
151 
152 		_cookie = cookie;
153 	}
154 
155 	// recycle a potential previously mapped page
156 	cookie->PutPhysicalPageIfNeeded();
157 
158 	if (cookie->vec_index >= fVecCount)
159 		return B_BAD_INDEX;
160 
161 	if (!fPhysical) {
162 		vector.iov_base = (void*)(addr_t)fVecs[cookie->vec_index].base;
163 		vector.iov_len = fVecs[cookie->vec_index++].length;
164 		return B_OK;
165 	}
166 
167 	if (cookie->vec_index == 0
168 		&& (fVecCount > 1 || fVecs[0].length > B_PAGE_SIZE)) {
169 		void* mappedAddress;
170 		addr_t mappedSize;
171 
172 // TODO: This is a potential violation of the VIP requirement, since
173 // vm_map_physical_memory_vecs() allocates memory without special flags!
174 		cookie->mapped_area = vm_map_physical_memory_vecs(
175 			VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
176 			&mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
177 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);
178 
179 		if (cookie->mapped_area >= 0) {
180 			vector.iov_base = mappedAddress;
181 			vector.iov_len = mappedSize;
182 			return B_OK;
183 		} else
184 			ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
185 	}
186 
187 	// fallback to page wise mapping
188 	generic_io_vec& currentVec = fVecs[cookie->vec_index];
189 	generic_addr_t address = currentVec.base + cookie->vec_offset;
190 	size_t pageOffset = address % B_PAGE_SIZE;
191 
192 // TODO: This is a potential violation of the VIP requirement, since
193 // vm_get_physical_page() may allocate memory without special flags!
194 	status_t result = vm_get_physical_page(address - pageOffset,
195 		&cookie->virtual_address, &cookie->physical_page_handle);
196 	if (result != B_OK)
197 		return result;
198 
199 	generic_size_t length = min_c(currentVec.length - cookie->vec_offset,
200 		B_PAGE_SIZE - pageOffset);
201 
202 	vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
203 	vector.iov_len = length;
204 
205 	cookie->vec_offset += length;
206 	if (cookie->vec_offset >= currentVec.length) {
207 		cookie->vec_index++;
208 		cookie->vec_offset = 0;
209 	}
210 
211 	return B_OK;
212 }
213 
214 
215 void
216 IOBuffer::FreeVirtualVecCookie(void* _cookie)
217 {
218 	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
219 	if (cookie->mapped_area >= 0)
220 		delete_area(cookie->mapped_area);
221 
222 	cookie->PutPhysicalPageIfNeeded();
223 
224 	free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
225 }
226 
227 
228 status_t
229 IOBuffer::LockMemory(team_id team, bool isWrite)
230 {
231 	if (fMemoryLocked) {
232 		panic("memory already locked!");
233 		return B_BAD_VALUE;
234 	}
235 
236 	for (uint32 i = 0; i < fVecCount; i++) {
237 		status_t status = lock_memory_etc(team, (void*)(addr_t)fVecs[i].base,
238 			fVecs[i].length, isWrite ? 0 : B_READ_DEVICE);
239 		if (status != B_OK) {
240 			_UnlockMemory(team, i, isWrite);
241 			return status;
242 		}
243 	}
244 
245 	fMemoryLocked = true;
246 	return B_OK;
247 }
248 
249 
250 void
251 IOBuffer::_UnlockMemory(team_id team, size_t count, bool isWrite)
252 {
253 	for (uint32 i = 0; i < count; i++) {
254 		unlock_memory_etc(team, (void*)(addr_t)fVecs[i].base, fVecs[i].length,
255 			isWrite ? 0 : B_READ_DEVICE);
256 	}
257 }
258 
259 
260 void
261 IOBuffer::UnlockMemory(team_id team, bool isWrite)
262 {
263 	if (!fMemoryLocked) {
264 		panic("memory not locked");
265 		return;
266 	}
267 
268 	_UnlockMemory(team, fVecCount, isWrite);
269 	fMemoryLocked = false;
270 }
271 
272 
273 void
274 IOBuffer::Dump() const
275 {
276 	kprintf("IOBuffer at %p\n", this);
277 
278 	kprintf("  origin:     %s\n", fUser ? "user" : "kernel");
279 	kprintf("  kind:       %s\n", fPhysical ? "physical" : "virtual");
280 	kprintf("  length:     %" B_PRIuGENADDR "\n", fLength);
281 	kprintf("  capacity:   %" B_PRIuSIZE "\n", fCapacity);
282 	kprintf("  vecs:       %" B_PRIuSIZE "\n", fVecCount);
283 
284 	for (uint32 i = 0; i < fVecCount; i++) {
285 		kprintf("    [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
286 			i, fVecs[i].base, fVecs[i].length);
287 	}
288 }
289 
290 
291 // #pragma mark -
292 
293 
294 bool
295 IOOperation::Finish()
296 {
297 	TRACE("IOOperation::Finish()\n");
298 	if (fStatus == B_OK) {
299 		if (fParent->IsWrite()) {
300 			TRACE("  is write\n");
301 			if (fPhase == PHASE_READ_BEGIN) {
302 				TRACE("  phase read begin\n");
303 				// repair phase adjusted vec
304 				fDMABuffer->VecAt(fSavedVecIndex).length = fSavedVecLength;
305 
306 				// partial write: copy partial begin to bounce buffer
307 				bool skipReadEndPhase;
308 				status_t error = _CopyPartialBegin(true, skipReadEndPhase);
309 				if (error == B_OK) {
310 					// We're done with the first phase only (read in begin).
311 					// Get ready for next phase...
312 					fPhase = HasPartialEnd() && !skipReadEndPhase
313 						? PHASE_READ_END : PHASE_DO_ALL;
314 					_PrepareVecs();
315 					ResetStatus();
316 						// TODO: Is there a race condition, if the request is
317 						// aborted at the same time?
318 					return false;
319 				}
320 
321 				SetStatus(error);
322 			} else if (fPhase == PHASE_READ_END) {
323 				TRACE("  phase read end\n");
324 				// repair phase adjusted vec
325 				generic_io_vec& vec = fDMABuffer->VecAt(fSavedVecIndex);
326 				vec.base += vec.length - fSavedVecLength;
327 				vec.length = fSavedVecLength;
328 
329 				// partial write: copy partial end to bounce buffer
330 				status_t error = _CopyPartialEnd(true);
331 				if (error == B_OK) {
332 					// We're done with the second phase only (read in end).
333 					// Get ready for next phase...
334 					fPhase = PHASE_DO_ALL;
335 					ResetStatus();
336 						// TODO: Is there a race condition, if the request is
337 						// aborted at the same time?
338 					return false;
339 				}
340 
341 				SetStatus(error);
342 			}
343 		}
344 	}
345 
346 	if (fParent->IsRead() && UsesBounceBuffer()) {
347 		TRACE("  read with bounce buffer\n");
348 		// copy the bounce buffer segments to the final location
349 		uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
350 		phys_addr_t bounceBufferStart
351 			= fDMABuffer->PhysicalBounceBufferAddress();
352 		phys_addr_t bounceBufferEnd = bounceBufferStart
353 			+ fDMABuffer->BounceBufferSize();
354 
355 		const generic_io_vec* vecs = fDMABuffer->Vecs();
356 		uint32 vecCount = fDMABuffer->VecCount();
357 
358 		status_t error = B_OK;
359 
360 		// We iterate through the vecs we have read, moving offset (the device
361 		// offset) as we go. If [offset, offset + vec.length) intersects with
362 		// [startOffset, endOffset) we copy to the final location.
363 		off_t offset = fOffset;
364 		const off_t startOffset = fOriginalOffset;
365 		const off_t endOffset = fOriginalOffset + fOriginalLength;
366 
367 		for (uint32 i = 0; error == B_OK && i < vecCount; i++) {
368 			const generic_io_vec& vec = vecs[i];
369 			generic_addr_t base = vec.base;
370 			generic_size_t length = vec.length;
371 
372 			if (offset < startOffset) {
373 				// If the complete vector is before the start offset, skip it.
374 				if (offset + (off_t)length <= startOffset) {
375 					offset += length;
376 					continue;
377 				}
378 
379 				// The vector starts before the start offset, but intersects
380 				// with it. Skip the part we aren't interested in.
381 				generic_size_t diff = startOffset - offset;
382 				offset += diff;
383 				base += diff;
384 				length -= diff;
385 			}
386 
387 			if (offset + (off_t)length > endOffset) {
388 				// If we're already beyond the end offset, we're done.
389 				if (offset >= endOffset)
390 					break;
391 
392 				// The vector extends beyond the end offset -- cut it.
393 				length = endOffset - offset;
394 			}
395 
396 			if (base >= bounceBufferStart && base < bounceBufferEnd) {
397 				error = fParent->CopyData(
398 					bounceBuffer + (base - bounceBufferStart), offset, length);
399 			}
400 
401 			offset += length;
402 		}
403 
404 		if (error != B_OK)
405 			SetStatus(error);
406 	}
407 
408 	return true;
409 }
410 
411 
412 /*!	Note: SetPartial() must be called first!
413 */
414 status_t
415 IOOperation::Prepare(IORequest* request)
416 {
417 	if (fParent != NULL)
418 		fParent->RemoveOperation(this);
419 
420 	fParent = request;
421 
422 	fTransferredBytes = 0;
423 
424 	// set initial phase
425 	fPhase = PHASE_DO_ALL;
426 	if (fParent->IsWrite()) {
427 		// Copy data to bounce buffer segments, save the partial begin/end vec,
428 		// which will be copied after their respective read phase.
429 		if (UsesBounceBuffer()) {
430 			TRACE("  write with bounce buffer\n");
431 			uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
432 			phys_addr_t bounceBufferStart
433 				= fDMABuffer->PhysicalBounceBufferAddress();
434 			phys_addr_t bounceBufferEnd = bounceBufferStart
435 				+ fDMABuffer->BounceBufferSize();
436 
437 			const generic_io_vec* vecs = fDMABuffer->Vecs();
438 			uint32 vecCount = fDMABuffer->VecCount();
439 			generic_size_t vecOffset = 0;
440 			uint32 i = 0;
441 
442 			off_t offset = fOffset;
443 			off_t endOffset = fOffset + fLength;
444 
445 			if (HasPartialBegin()) {
446 				// skip first block
447 				generic_size_t toSkip = fBlockSize;
448 				while (toSkip > 0) {
449 					if (vecs[i].length <= toSkip) {
450 						toSkip -= vecs[i].length;
451 						i++;
452 					} else {
453 						vecOffset = toSkip;
454 						break;
455 					}
456 				}
457 
458 				offset += fBlockSize;
459 			}
460 
461 			if (HasPartialEnd()) {
462 				// skip last block
463 				generic_size_t toSkip = fBlockSize;
464 				while (toSkip > 0) {
465 					if (vecs[vecCount - 1].length <= toSkip) {
466 						toSkip -= vecs[vecCount - 1].length;
467 						vecCount--;
468 					} else
469 						break;
470 				}
471 
472 				endOffset -= fBlockSize;
473 			}
474 
475 			for (; i < vecCount; i++) {
476 				const generic_io_vec& vec = vecs[i];
477 				generic_addr_t base = vec.base + vecOffset;
478 				generic_size_t length = vec.length - vecOffset;
479 				vecOffset = 0;
480 
481 				if (base >= bounceBufferStart && base < bounceBufferEnd) {
482 					if (offset + (off_t)length > endOffset)
483 						length = endOffset - offset;
484 					status_t error = fParent->CopyData(offset,
485 						bounceBuffer + (base - bounceBufferStart), length);
486 					if (error != B_OK)
487 						return error;
488 				}
489 
490 				offset += length;
491 			}
492 		}
493 
494 		if (HasPartialBegin())
495 			fPhase = PHASE_READ_BEGIN;
496 		else if (HasPartialEnd())
497 			fPhase = PHASE_READ_END;
498 
499 		_PrepareVecs();
500 	}
501 
502 	ResetStatus();
503 
504 	if (fParent != NULL)
505 		fParent->AddOperation(this);
506 
507 	return B_OK;
508 }
509 
510 
511 void
512 IOOperation::SetOriginalRange(off_t offset, generic_size_t length)
513 {
514 	fOriginalOffset = fOffset = offset;
515 	fOriginalLength = fLength = length;
516 }
517 
518 
519 void
520 IOOperation::SetRange(off_t offset, generic_size_t length)
521 {
522 	fOffset = offset;
523 	fLength = length;
524 }
525 
526 
527 off_t
528 IOOperation::Offset() const
529 {
530 	return fPhase == PHASE_READ_END ? fOffset + fLength - fBlockSize : fOffset;
531 }
532 
533 
534 generic_size_t
535 IOOperation::Length() const
536 {
537 	return fPhase == PHASE_DO_ALL ? fLength : fBlockSize;
538 }
539 
540 
541 generic_io_vec*
542 IOOperation::Vecs() const
543 {
544 	switch (fPhase) {
545 		case PHASE_READ_END:
546 			return fDMABuffer->Vecs() + fSavedVecIndex;
547 		case PHASE_READ_BEGIN:
548 		case PHASE_DO_ALL:
549 		default:
550 			return fDMABuffer->Vecs();
551 	}
552 }
553 
554 
555 uint32
556 IOOperation::VecCount() const
557 {
558 	switch (fPhase) {
559 		case PHASE_READ_BEGIN:
560 			return fSavedVecIndex + 1;
561 		case PHASE_READ_END:
562 			return fDMABuffer->VecCount() - fSavedVecIndex;
563 		case PHASE_DO_ALL:
564 		default:
565 			return fDMABuffer->VecCount();
566 	}
567 }
568 
569 
570 void
571 IOOperation::SetPartial(bool partialBegin, bool partialEnd)
572 {
573 	TRACE("partial begin %d, end %d\n", partialBegin, partialEnd);
574 	fPartialBegin = partialBegin;
575 	fPartialEnd = partialEnd;
576 }
577 
578 
579 bool
580 IOOperation::IsWrite() const
581 {
582 	return fParent->IsWrite() && fPhase == PHASE_DO_ALL;
583 }
584 
585 
586 bool
587 IOOperation::IsRead() const
588 {
589 	return fParent->IsRead();
590 }
591 
592 
593 void
594 IOOperation::_PrepareVecs()
595 {
596 	// we need to prepare the vecs for consumption by the drivers
597 	if (fPhase == PHASE_READ_BEGIN) {
598 		generic_io_vec* vecs = fDMABuffer->Vecs();
599 		uint32 vecCount = fDMABuffer->VecCount();
600 		generic_size_t vecLength = fBlockSize;
601 		for (uint32 i = 0; i < vecCount; i++) {
602 			generic_io_vec& vec = vecs[i];
603 			if (vec.length >= vecLength) {
604 				fSavedVecIndex = i;
605 				fSavedVecLength = vec.length;
606 				vec.length = vecLength;
607 				break;
608 			}
609 			vecLength -= vec.length;
610 		}
611 	} else if (fPhase == PHASE_READ_END) {
612 		generic_io_vec* vecs = fDMABuffer->Vecs();
613 		uint32 vecCount = fDMABuffer->VecCount();
614 		generic_size_t vecLength = fBlockSize;
615 		for (int32 i = vecCount - 1; i >= 0; i--) {
616 			generic_io_vec& vec = vecs[i];
617 			if (vec.length >= vecLength) {
618 				fSavedVecIndex = i;
619 				fSavedVecLength = vec.length;
620 				vec.base += vec.length - vecLength;
621 				vec.length = vecLength;
622 				break;
623 			}
624 			vecLength -= vec.length;
625 		}
626 	}
627 }
628 
629 
630 status_t
631 IOOperation::_CopyPartialBegin(bool isWrite, bool& singleBlockOnly)
632 {
633 	generic_size_t relativeOffset = OriginalOffset() - fOffset;
634 	generic_size_t length = fBlockSize - relativeOffset;
635 
636 	singleBlockOnly = length >= OriginalLength();
637 	if (singleBlockOnly)
638 		length = OriginalLength();
639 
640 	TRACE("_CopyPartialBegin(%s, single only %d)\n",
641 		isWrite ? "write" : "read", singleBlockOnly);
642 
643 	if (isWrite) {
644 		return fParent->CopyData(OriginalOffset(),
645 			(uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset, length);
646 	} else {
647 		return fParent->CopyData(
648 			(uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset,
649 			OriginalOffset(), length);
650 	}
651 }
652 
653 
654 status_t
655 IOOperation::_CopyPartialEnd(bool isWrite)
656 {
657 	TRACE("_CopyPartialEnd(%s)\n", isWrite ? "write" : "read");
658 
659 	const generic_io_vec& lastVec
660 		= fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
661 	off_t lastVecPos = fOffset + fLength - fBlockSize;
662 	uint8* base = (uint8*)fDMABuffer->BounceBufferAddress()
663 		+ (lastVec.base + lastVec.length - fBlockSize
664 		- fDMABuffer->PhysicalBounceBufferAddress());
665 		// NOTE: this won't work if we don't use the bounce buffer contiguously
666 		// (because of boundary alignments).
667 	generic_size_t length = OriginalOffset() + OriginalLength() - lastVecPos;
668 
669 	if (isWrite)
670 		return fParent->CopyData(lastVecPos, base, length);
671 
672 	return fParent->CopyData(base, lastVecPos, length);
673 }
674 
675 
676 void
677 IOOperation::Dump() const
678 {
679 	kprintf("io_operation at %p\n", this);
680 
681 	kprintf("  parent:           %p\n", fParent);
682 	kprintf("  status:           %s\n", strerror(fStatus));
683 	kprintf("  dma buffer:       %p\n", fDMABuffer);
684 	kprintf("  offset:           %-8" B_PRIdOFF " (original: %" B_PRIdOFF ")\n",
685 		fOffset, fOriginalOffset);
686 	kprintf("  length:           %-8" B_PRIuGENADDR " (original: %"
687 		B_PRIuGENADDR ")\n", fLength, fOriginalLength);
688 	kprintf("  transferred:      %" B_PRIuGENADDR "\n", fTransferredBytes);
689 	kprintf("  block size:       %" B_PRIuGENADDR "\n", fBlockSize);
690 	kprintf("  saved vec index:  %u\n", fSavedVecIndex);
691 	kprintf("  saved vec length: %u\n", fSavedVecLength);
692 	kprintf("  r/w:              %s\n", IsWrite() ? "write" : "read");
693 	kprintf("  phase:            %s\n", fPhase == PHASE_READ_BEGIN
694 		? "read begin" : fPhase == PHASE_READ_END ? "read end"
695 		: fPhase == PHASE_DO_ALL ? "do all" : "unknown");
696 	kprintf("  partial begin:    %s\n", fPartialBegin ? "yes" : "no");
697 	kprintf("  partial end:      %s\n", fPartialEnd ? "yes" : "no");
698 	kprintf("  bounce buffer:    %s\n", fUsesBounceBuffer ? "yes" : "no");
699 
700 	set_debug_variable("_parent", (addr_t)fParent);
701 	set_debug_variable("_buffer", (addr_t)fDMABuffer);
702 }
703 
704 
705 // #pragma mark -
706 
707 
708 IORequest::IORequest()
709 	:
710 	fIsNotified(false),
711 	fFinishedCallback(NULL),
712 	fFinishedCookie(NULL),
713 	fIterationCallback(NULL),
714 	fIterationCookie(NULL)
715 {
716 	mutex_init(&fLock, "I/O request lock");
717 	fFinishedCondition.Init(this, "I/O request finished");
718 }
719 
720 
721 IORequest::~IORequest()
722 {
723 	mutex_lock(&fLock);
724 	DeleteSubRequests();
725 	if (fBuffer != NULL)
726 		fBuffer->Delete();
727 	mutex_destroy(&fLock);
728 }
729 
730 
731 /* static */ IORequest*
732 IORequest::Create(bool vip)
733 {
734 	return vip
735 		? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
736 		: new(std::nothrow) IORequest;
737 }
738 
739 
740 status_t
741 IORequest::Init(off_t offset, generic_addr_t buffer, generic_size_t length,
742 	bool write, uint32 flags)
743 {
744 	ASSERT(offset >= 0);
745 
746 	generic_io_vec vec;
747 	vec.base = buffer;
748 	vec.length = length;
749 	return Init(offset, &vec, 1, length, write, flags);
750 }
751 
752 
753 status_t
754 IORequest::Init(off_t offset, generic_size_t firstVecOffset,
755 	generic_size_t lastVecSize, const generic_io_vec* vecs, size_t count,
756 	generic_size_t length, bool write, uint32 flags)
757 {
758 	ASSERT(offset >= 0);
759 
760 	fBuffer = IOBuffer::Create(count, (flags & B_VIP_IO_REQUEST) != 0);
761 	if (fBuffer == NULL)
762 		return B_NO_MEMORY;
763 
764 	fBuffer->SetVecs(firstVecOffset, lastVecSize, vecs, count, length, flags);
765 
766 	fOwner = NULL;
767 	fOffset = offset;
768 	fLength = length;
769 	fRelativeParentOffset = 0;
770 	fTransferSize = 0;
771 	fFlags = flags;
772 	Thread* thread = thread_get_current_thread();
773 	fTeam = thread->team->id;
774 	fThread = thread->id;
775 	fIsWrite = write;
776 	fPartialTransfer = false;
777 	fSuppressChildNotifications = false;
778 
779 	// these are for iteration
780 	fVecIndex = 0;
781 	fVecOffset = 0;
782 	fRemainingBytes = length;
783 
784 	fPendingChildren = 0;
785 
786 	fStatus = 1;
787 
788 	return B_OK;
789 }
790 
791 
792 status_t
793 IORequest::CreateSubRequest(off_t parentOffset, off_t offset,
794 	generic_size_t length, IORequest*& _subRequest)
795 {
796 	ASSERT(parentOffset >= fOffset && length <= fLength
797 		&& parentOffset - fOffset <= (off_t)(fLength - length));
798 
799 	// find start vec
800 	generic_size_t vecOffset = parentOffset - fOffset;
801 	generic_io_vec* vecs = fBuffer->Vecs();
802 	int32 vecCount = fBuffer->VecCount();
803 	int32 startVec = 0;
804 	for (; startVec < vecCount; startVec++) {
805 		const generic_io_vec& vec = vecs[startVec];
806 		if (vecOffset < vec.length)
807 			break;
808 
809 		vecOffset -= vec.length;
810 	}
811 
812 	// count vecs
813 	generic_size_t currentVecOffset = vecOffset;
814 	int32 endVec = startVec;
815 	generic_size_t remainingLength = length;
816 	for (; endVec < vecCount; endVec++) {
817 		const generic_io_vec& vec = vecs[endVec];
818 		if (vec.length - currentVecOffset >= remainingLength)
819 			break;
820 
821 		remainingLength -= vec.length - currentVecOffset;
822 		currentVecOffset = 0;
823 	}
824 
825 	// create subrequest
826 	IORequest* subRequest = Create((fFlags & B_VIP_IO_REQUEST) != 0);
827 	if (subRequest == NULL)
828 		return B_NO_MEMORY;
829 
830 	status_t error = subRequest->Init(offset, vecOffset, remainingLength,
831 		vecs + startVec, endVec - startVec + 1, length, fIsWrite,
832 		fFlags & ~B_DELETE_IO_REQUEST);
833 	if (error != B_OK) {
834 		delete subRequest;
835 		return error;
836 	}
837 
838 	subRequest->fRelativeParentOffset = parentOffset - fOffset;
839 	subRequest->fTeam = fTeam;
840 	subRequest->fThread = fThread;
841 
842 	_subRequest = subRequest;
843 	subRequest->SetParent(this);
844 
845 	MutexLocker _(fLock);
846 
847 	fChildren.Add(subRequest);
848 	fPendingChildren++;
849 	TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
850 		subRequest);
851 
852 	return B_OK;
853 }
854 
855 
856 void
857 IORequest::DeleteSubRequests()
858 {
859 	while (IORequestChunk* chunk = fChildren.RemoveHead())
860 		delete chunk;
861 	fPendingChildren = 0;
862 }
863 
864 
865 void
866 IORequest::SetFinishedCallback(io_request_finished_callback callback,
867 	void* cookie)
868 {
869 	fFinishedCallback = callback;
870 	fFinishedCookie = cookie;
871 }
872 
873 
874 void
875 IORequest::SetIterationCallback(io_request_iterate_callback callback,
876 	void* cookie)
877 {
878 	fIterationCallback = callback;
879 	fIterationCookie = cookie;
880 }
881 
882 
883 io_request_finished_callback
884 IORequest::FinishedCallback(void** _cookie) const
885 {
886 	if (_cookie != NULL)
887 		*_cookie = fFinishedCookie;
888 	return fFinishedCallback;
889 }
890 
891 
892 status_t
893 IORequest::Wait(uint32 flags, bigtime_t timeout)
894 {
895 	MutexLocker locker(fLock);
896 
897 	if (IsFinished() && fIsNotified)
898 		return Status();
899 
900 	ConditionVariableEntry entry;
901 	fFinishedCondition.Add(&entry);
902 
903 	locker.Unlock();
904 
905 	status_t error = entry.Wait(flags, timeout);
906 	if (error != B_OK)
907 		return error;
908 
909 	return Status();
910 }
911 
912 
913 void
914 IORequest::NotifyFinished()
915 {
916 	TRACE("IORequest::NotifyFinished(): request: %p\n", this);
917 
918 	MutexLocker locker(fLock);
919 
920 	if (fStatus == B_OK && !fPartialTransfer && RemainingBytes() > 0) {
921 		// The request is not really done yet. If it has an iteration callback,
922 		// call it.
923 		if (fIterationCallback != NULL) {
924 			ResetStatus();
925 			locker.Unlock();
926 			bool partialTransfer = false;
927 			status_t error = fIterationCallback(fIterationCookie, this,
928 				&partialTransfer);
929 			if (error == B_OK && !partialTransfer)
930 				return;
931 
932 			// Iteration failed, which means we're responsible for notifying the
933 			// requests finished.
934 			locker.Lock();
935 			fStatus = error;
936 			fPartialTransfer = true;
937 		}
938 	}
939 
940 	ASSERT(!fIsNotified);
941 	ASSERT(fPendingChildren == 0);
942 	ASSERT(fChildren.IsEmpty()
943 		|| dynamic_cast<IOOperation*>(fChildren.Head()) == NULL);
944 
945 	// unlock the memory
946 	if (fBuffer->IsMemoryLocked())
947 		fBuffer->UnlockMemory(fTeam, fIsWrite);
948 
949 	// Cache the callbacks before we unblock waiters and unlock. Any of the
950 	// following could delete this request, so we don't want to touch it
951 	// once we have started telling others that it is done.
952 	IORequest* parent = fParent;
953 	io_request_finished_callback finishedCallback = fFinishedCallback;
954 	void* finishedCookie = fFinishedCookie;
955 	status_t status = fStatus;
956 	generic_size_t lastTransferredOffset
957 		= fRelativeParentOffset + fTransferSize;
958 	bool partialTransfer = status != B_OK || fPartialTransfer;
959 	bool deleteRequest = (fFlags & B_DELETE_IO_REQUEST) != 0;
960 
961 	// unblock waiters
962 	fIsNotified = true;
963 	fFinishedCondition.NotifyAll();
964 
965 	locker.Unlock();
966 
967 	// notify callback
968 	if (finishedCallback != NULL) {
969 		finishedCallback(finishedCookie, this, status, partialTransfer,
970 			lastTransferredOffset);
971 	}
972 
973 	// notify parent
974 	if (parent != NULL) {
975 		parent->SubRequestFinished(this, status, partialTransfer,
976 			lastTransferredOffset);
977 	}
978 
979 	if (deleteRequest)
980 		delete this;
981 }
982 
983 
984 /*!	Returns whether this request or any of it's ancestors has a finished or
985 	notification callback. Used to decide whether NotifyFinished() can be called
986 	synchronously.
987 */
988 bool
989 IORequest::HasCallbacks() const
990 {
991 	if (fFinishedCallback != NULL || fIterationCallback != NULL)
992 		return true;
993 
994 	return fParent != NULL && fParent->HasCallbacks();
995 }
996 
997 
998 void
999 IORequest::SetStatusAndNotify(status_t status)
1000 {
1001 	MutexLocker locker(fLock);
1002 
1003 	if (fStatus != 1)
1004 		return;
1005 
1006 	fStatus = status;
1007 
1008 	locker.Unlock();
1009 
1010 	NotifyFinished();
1011 }
1012 
1013 
1014 void
1015 IORequest::OperationFinished(IOOperation* operation, status_t status,
1016 	bool partialTransfer, generic_size_t transferEndOffset)
1017 {
1018 	TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32 "): request: %p\n",
1019 		operation, status, this);
1020 
1021 	MutexLocker locker(fLock);
1022 
1023 	fChildren.Remove(operation);
1024 	operation->SetParent(NULL);
1025 
1026 	if (status != B_OK || partialTransfer) {
1027 		if (fTransferSize > transferEndOffset)
1028 			fTransferSize = transferEndOffset;
1029 		fPartialTransfer = true;
1030 	}
1031 
1032 	if (status != B_OK && fStatus == 1)
1033 		fStatus = status;
1034 
1035 	if (--fPendingChildren > 0)
1036 		return;
1037 
1038 	// last child finished
1039 
1040 	// set status, if not done yet
1041 	if (fStatus == 1)
1042 		fStatus = B_OK;
1043 }
1044 
1045 
1046 void
1047 IORequest::SubRequestFinished(IORequest* request, status_t status,
1048 	bool partialTransfer, generic_size_t transferEndOffset)
1049 {
1050 	TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32 ", %d, %"
1051 		B_PRIuGENADDR "): request: %p\n", request, status, partialTransfer, transferEndOffset, this);
1052 
1053 	MutexLocker locker(fLock);
1054 
1055 	if (status != B_OK || partialTransfer) {
1056 		if (fTransferSize > transferEndOffset)
1057 			fTransferSize = transferEndOffset;
1058 		fPartialTransfer = true;
1059 	}
1060 
1061 	if (status != B_OK && fStatus == 1)
1062 		fStatus = status;
1063 
1064 	if (--fPendingChildren > 0 || fSuppressChildNotifications)
1065 		return;
1066 
1067 	// last child finished
1068 
1069 	// set status, if not done yet
1070 	if (fStatus == 1)
1071 		fStatus = B_OK;
1072 
1073 	locker.Unlock();
1074 
1075 	NotifyFinished();
1076 }
1077 
1078 
1079 void
1080 IORequest::SetUnfinished()
1081 {
1082 	MutexLocker _(fLock);
1083 	ResetStatus();
1084 }
1085 
1086 
1087 void
1088 IORequest::SetTransferredBytes(bool partialTransfer,
1089 	generic_size_t transferredBytes)
1090 {
1091 	TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR ")\n", this,
1092 		partialTransfer, transferredBytes);
1093 
1094 	MutexLocker _(fLock);
1095 
1096 	fPartialTransfer = partialTransfer;
1097 	fTransferSize = transferredBytes;
1098 }
1099 
1100 
1101 void
1102 IORequest::SetSuppressChildNotifications(bool suppress)
1103 {
1104 	fSuppressChildNotifications = suppress;
1105 }
1106 
1107 
1108 void
1109 IORequest::Advance(generic_size_t bySize)
1110 {
1111 	TRACE("IORequest::Advance(%" B_PRIuGENADDR "): remaining: %" B_PRIuGENADDR
1112 		" -> %" B_PRIuGENADDR "\n", bySize, fRemainingBytes,
1113 		fRemainingBytes - bySize);
1114 	fRemainingBytes -= bySize;
1115 	fTransferSize += bySize;
1116 
1117 	generic_io_vec* vecs = fBuffer->Vecs();
1118 	uint32 vecCount = fBuffer->VecCount();
1119 	while (fVecIndex < vecCount
1120 			&& vecs[fVecIndex].length - fVecOffset <= bySize) {
1121 		bySize -= vecs[fVecIndex].length - fVecOffset;
1122 		fVecOffset = 0;
1123 		fVecIndex++;
1124 	}
1125 
1126 	fVecOffset += bySize;
1127 }
1128 
1129 
1130 IORequest*
1131 IORequest::FirstSubRequest()
1132 {
1133 	return dynamic_cast<IORequest*>(fChildren.Head());
1134 }
1135 
1136 
1137 IORequest*
1138 IORequest::NextSubRequest(IORequest* previous)
1139 {
1140 	if (previous == NULL)
1141 		return NULL;
1142 	return dynamic_cast<IORequest*>(fChildren.GetNext(previous));
1143 }
1144 
1145 
1146 void
1147 IORequest::AddOperation(IOOperation* operation)
1148 {
1149 	MutexLocker locker(fLock);
1150 	TRACE("IORequest::AddOperation(%p): request: %p\n", operation, this);
1151 	fChildren.Add(operation);
1152 	fPendingChildren++;
1153 }
1154 
1155 
1156 void
1157 IORequest::RemoveOperation(IOOperation* operation)
1158 {
1159 	MutexLocker locker(fLock);
1160 	fChildren.Remove(operation);
1161 	operation->SetParent(NULL);
1162 }
1163 
1164 
1165 status_t
1166 IORequest::CopyData(off_t offset, void* buffer, size_t size)
1167 {
1168 	return _CopyData(buffer, offset, size, true);
1169 }
1170 
1171 
1172 status_t
1173 IORequest::CopyData(const void* buffer, off_t offset, size_t size)
1174 {
1175 	return _CopyData((void*)buffer, offset, size, false);
1176 }
1177 
1178 
1179 status_t
1180 IORequest::ClearData(off_t offset, generic_size_t size)
1181 {
1182 	if (size == 0)
1183 		return B_OK;
1184 
1185 	if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1186 		panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1187 			", %" B_PRIuGENADDR ")", offset, size);
1188 		return B_BAD_VALUE;
1189 	}
1190 
1191 	// If we can, we directly copy from/to the virtual buffer. The memory is
1192 	// locked in this case.
1193 	status_t (*clearFunction)(generic_addr_t, generic_size_t, team_id);
1194 	if (fBuffer->IsPhysical()) {
1195 		clearFunction = &IORequest::_ClearDataPhysical;
1196 	} else {
1197 		clearFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1198 			? &IORequest::_ClearDataUser : &IORequest::_ClearDataSimple;
1199 	}
1200 
1201 	// skip bytes if requested
1202 	generic_io_vec* vecs = fBuffer->Vecs();
1203 	generic_size_t skipBytes = offset - fOffset;
1204 	generic_size_t vecOffset = 0;
1205 	while (skipBytes > 0) {
1206 		if (vecs[0].length > skipBytes) {
1207 			vecOffset = skipBytes;
1208 			break;
1209 		}
1210 
1211 		skipBytes -= vecs[0].length;
1212 		vecs++;
1213 	}
1214 
1215 	// clear vector-wise
1216 	while (size > 0) {
1217 		generic_size_t toClear = min_c(size, vecs[0].length - vecOffset);
1218 		status_t error = clearFunction(vecs[0].base + vecOffset, toClear,
1219 			fTeam);
1220 		if (error != B_OK)
1221 			return error;
1222 
1223 		size -= toClear;
1224 		vecs++;
1225 		vecOffset = 0;
1226 	}
1227 
1228 	return B_OK;
1229 
1230 }
1231 
1232 
1233 status_t
1234 IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
1235 {
1236 	if (size == 0)
1237 		return B_OK;
1238 
1239 	uint8* buffer = (uint8*)_buffer;
1240 
1241 	if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1242 		panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF ", %lu)",
1243 			offset, size);
1244 		return B_BAD_VALUE;
1245 	}
1246 
1247 	// If we can, we directly copy from/to the virtual buffer. The memory is
1248 	// locked in this case.
1249 	status_t (*copyFunction)(void*, generic_addr_t, size_t, team_id, bool);
1250 	if (fBuffer->IsPhysical()) {
1251 		copyFunction = &IORequest::_CopyPhysical;
1252 	} else {
1253 		copyFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1254 			? &IORequest::_CopyUser : &IORequest::_CopySimple;
1255 	}
1256 
1257 	// skip bytes if requested
1258 	generic_io_vec* vecs = fBuffer->Vecs();
1259 	generic_size_t skipBytes = offset - fOffset;
1260 	generic_size_t vecOffset = 0;
1261 	while (skipBytes > 0) {
1262 		if (vecs[0].length > skipBytes) {
1263 			vecOffset = skipBytes;
1264 			break;
1265 		}
1266 
1267 		skipBytes -= vecs[0].length;
1268 		vecs++;
1269 	}
1270 
1271 	// copy vector-wise
1272 	while (size > 0) {
1273 		generic_size_t toCopy = min_c(size, vecs[0].length - vecOffset);
1274 		status_t error = copyFunction(buffer, vecs[0].base + vecOffset, toCopy,
1275 			fTeam, copyIn);
1276 		if (error != B_OK)
1277 			return error;
1278 
1279 		buffer += toCopy;
1280 		size -= toCopy;
1281 		vecs++;
1282 		vecOffset = 0;
1283 	}
1284 
1285 	return B_OK;
1286 }
1287 
1288 
1289 /* static */ status_t
1290 IORequest::_CopySimple(void* bounceBuffer, generic_addr_t external, size_t size,
1291 	team_id team, bool copyIn)
1292 {
1293 	TRACE("  IORequest::_CopySimple(%p, %#" B_PRIxGENADDR ", %lu, %d)\n",
1294 		bounceBuffer, external, size, copyIn);
1295 	if (IS_USER_ADDRESS(external)) {
1296 		status_t status = B_OK;
1297 		if (copyIn)
1298 			status = user_memcpy(bounceBuffer, (void*)(addr_t)external, size);
1299 		else
1300 			status = user_memcpy((void*)(addr_t)external, bounceBuffer, size);
1301 		if (status < B_OK)
1302 			return status;
1303 		return B_OK;
1304 	}
1305 	if (copyIn)
1306 		memcpy(bounceBuffer, (void*)(addr_t)external, size);
1307 	else
1308 		memcpy((void*)(addr_t)external, bounceBuffer, size);
1309 	return B_OK;
1310 }
1311 
1312 
1313 /* static */ status_t
1314 IORequest::_CopyPhysical(void* bounceBuffer, generic_addr_t external,
1315 	size_t size, team_id team, bool copyIn)
1316 {
1317 	if (copyIn)
1318 		return vm_memcpy_from_physical(bounceBuffer, external, size, false);
1319 
1320 	return vm_memcpy_to_physical(external, bounceBuffer, size, false);
1321 }
1322 
1323 
1324 /* static */ status_t
1325 IORequest::_CopyUser(void* _bounceBuffer, generic_addr_t _external, size_t size,
1326 	team_id team, bool copyIn)
1327 {
1328 	uint8* bounceBuffer = (uint8*)_bounceBuffer;
1329 	uint8* external = (uint8*)(addr_t)_external;
1330 
1331 	while (size > 0) {
1332 		static const int32 kEntryCount = 8;
1333 		physical_entry entries[kEntryCount];
1334 
1335 		uint32 count = kEntryCount;
1336 		status_t error = get_memory_map_etc(team, external, size, entries,
1337 			&count);
1338 		if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1339 			panic("IORequest::_CopyUser(): Failed to get physical memory for "
1340 				"user memory %p\n", external);
1341 			return B_BAD_ADDRESS;
1342 		}
1343 
1344 		for (uint32 i = 0; i < count; i++) {
1345 			const physical_entry& entry = entries[i];
1346 			error = _CopyPhysical(bounceBuffer, entry.address, entry.size, team,
1347 				copyIn);
1348 			if (error != B_OK)
1349 				return error;
1350 
1351 			size -= entry.size;
1352 			bounceBuffer += entry.size;
1353 			external += entry.size;
1354 		}
1355 	}
1356 
1357 	return B_OK;
1358 }
1359 
1360 
1361 /*static*/ status_t
1362 IORequest::_ClearDataSimple(generic_addr_t external, generic_size_t size,
1363 	team_id team)
1364 {
1365 	memset((void*)(addr_t)external, 0, (size_t)size);
1366 	return B_OK;
1367 }
1368 
1369 
1370 /*static*/ status_t
1371 IORequest::_ClearDataPhysical(generic_addr_t external, generic_size_t size,
1372 	team_id team)
1373 {
1374 	return vm_memset_physical((phys_addr_t)external, 0, (phys_size_t)size);
1375 }
1376 
1377 
1378 /*static*/ status_t
1379 IORequest::_ClearDataUser(generic_addr_t _external, generic_size_t size,
1380 	team_id team)
1381 {
1382 	uint8* external = (uint8*)(addr_t)_external;
1383 
1384 	while (size > 0) {
1385 		static const int32 kEntryCount = 8;
1386 		physical_entry entries[kEntryCount];
1387 
1388 		uint32 count = kEntryCount;
1389 		status_t error = get_memory_map_etc(team, external, size, entries,
1390 			&count);
1391 		if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1392 			panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1393 				"for user memory %p\n", external);
1394 			return B_BAD_ADDRESS;
1395 		}
1396 
1397 		for (uint32 i = 0; i < count; i++) {
1398 			const physical_entry& entry = entries[i];
1399 			error = _ClearDataPhysical(entry.address, entry.size, team);
1400 			if (error != B_OK)
1401 				return error;
1402 
1403 			size -= entry.size;
1404 			external += entry.size;
1405 		}
1406 	}
1407 
1408 	return B_OK;
1409 }
1410 
1411 
1412 void
1413 IORequest::Dump() const
1414 {
1415 	kprintf("io_request at %p\n", this);
1416 
1417 	kprintf("  owner:             %p\n", fOwner);
1418 	kprintf("  parent:            %p\n", fParent);
1419 	kprintf("  status:            %s\n", strerror(fStatus));
1420 	kprintf("  mutex:             %p\n", &fLock);
1421 	kprintf("  IOBuffer:          %p\n", fBuffer);
1422 	kprintf("  offset:            %" B_PRIdOFF "\n", fOffset);
1423 	kprintf("  length:            %" B_PRIuGENADDR "\n", fLength);
1424 	kprintf("  transfer size:     %" B_PRIuGENADDR "\n", fTransferSize);
1425 	kprintf("  relative offset:   %" B_PRIuGENADDR "\n", fRelativeParentOffset);
1426 	kprintf("  pending children:  %" B_PRId32 "\n", fPendingChildren);
1427 	kprintf("  flags:             %#" B_PRIx32 "\n", fFlags);
1428 	kprintf("  team:              %" B_PRId32 "\n", fTeam);
1429 	kprintf("  thread:            %" B_PRId32 "\n", fThread);
1430 	kprintf("  r/w:               %s\n", fIsWrite ? "write" : "read");
1431 	kprintf("  partial transfer:  %s\n", fPartialTransfer ? "yes" : "no");
1432 	kprintf("  finished cvar:     %p\n", &fFinishedCondition);
1433 	kprintf("  iteration:\n");
1434 	kprintf("    vec index:       %" B_PRIu32 "\n", fVecIndex);
1435 	kprintf("    vec offset:      %" B_PRIuGENADDR "\n", fVecOffset);
1436 	kprintf("    remaining bytes: %" B_PRIuGENADDR "\n", fRemainingBytes);
1437 	kprintf("  callbacks:\n");
1438 	kprintf("    finished %p, cookie %p\n", fFinishedCallback, fFinishedCookie);
1439 	kprintf("    iteration %p, cookie %p\n", fIterationCallback,
1440 		fIterationCookie);
1441 	kprintf("  children:\n");
1442 
1443 	IORequestChunkList::ConstIterator iterator = fChildren.GetIterator();
1444 	while (iterator.HasNext()) {
1445 		kprintf("    %p\n", iterator.Next());
1446 	}
1447 
1448 	set_debug_variable("_parent", (addr_t)fParent);
1449 	set_debug_variable("_mutex", (addr_t)&fLock);
1450 	set_debug_variable("_buffer", (addr_t)fBuffer);
1451 	set_debug_variable("_cvar", (addr_t)&fFinishedCondition);
1452 }
1453