xref: /haiku/src/system/kernel/device_manager/IORequest.cpp (revision 1e60bdeab63fa7a57bc9a55b032052e95a18bd2c)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2017, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "IORequest.h"
9 
10 #include <string.h>
11 
12 #include <arch/debug.h>
13 #include <debug.h>
14 #include <heap.h>
15 #include <kernel.h>
16 #include <team.h>
17 #include <thread.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
21 
22 #include "dma_resources.h"
23 
24 
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 #	define TRACE(x...) dprintf(x)
28 #else
29 #	define TRACE(x...) ;
30 #endif
31 
32 
33 // partial I/O operation phases
34 enum {
35 	PHASE_READ_BEGIN	= 0,
36 	PHASE_READ_END		= 1,
37 	PHASE_DO_ALL		= 2
38 };
39 
40 
41 struct virtual_vec_cookie {
42 	uint32			vec_index;
43 	generic_size_t	vec_offset;
44 	area_id			mapped_area;
45 	void*			physical_page_handle;
46 	addr_t			virtual_address;
47 
48 	virtual_vec_cookie()
49 		:
50 		vec_index(0),
51 		vec_offset(0),
52 		mapped_area(-1),
53 		physical_page_handle(NULL),
54 		virtual_address((addr_t)-1)
55 	{
56 	}
57 
58 	void PutPhysicalPageIfNeeded()
59 	{
60 		if (virtual_address != (addr_t)-1) {
61 			vm_put_physical_page(virtual_address, physical_page_handle);
62 			virtual_address = (addr_t)-1;
63 		}
64 	}
65 };
66 
67 
68 // #pragma mark -
69 
70 
71 IORequestChunk::IORequestChunk()
72 	:
73 	fParent(NULL),
74 	fStatus(1)
75 {
76 }
77 
78 
79 IORequestChunk::~IORequestChunk()
80 {
81 }
82 
83 
84 //	#pragma mark -
85 
86 
87 IOBuffer*
88 IOBuffer::Create(uint32 count, bool vip)
89 {
90 	size_t size = sizeof(IOBuffer) + sizeof(generic_io_vec) * (count - 1);
91 	IOBuffer* buffer
92 		= (IOBuffer*)(malloc_etc(size, vip ? HEAP_PRIORITY_VIP : 0));
93 	if (buffer == NULL)
94 		return NULL;
95 
96 	buffer->fCapacity = count;
97 	buffer->fVecCount = 0;
98 	buffer->fUser = false;
99 	buffer->fPhysical = false;
100 	buffer->fVIP = vip;
101 	buffer->fMemoryLocked = false;
102 
103 	return buffer;
104 }
105 
106 
107 void
108 IOBuffer::Delete()
109 {
110 	free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
111 }
112 
113 
114 void
115 IOBuffer::SetVecs(generic_size_t firstVecOffset, const generic_io_vec* vecs,
116 	uint32 count, generic_size_t length, uint32 flags)
117 {
118 	memcpy(fVecs, vecs, sizeof(generic_io_vec) * count);
119 
120 	if (count > 0 && firstVecOffset > 0) {
121 		fVecs[0].base += firstVecOffset;
122 		fVecs[0].length -= firstVecOffset;
123 	}
124 
125 	fVecCount = count;
126 	fLength = length;
127 	fPhysical = (flags & B_PHYSICAL_IO_REQUEST) != 0;
128 	fUser = !fPhysical && IS_USER_ADDRESS(vecs[0].base);
129 }
130 
131 
132 status_t
133 IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
134 {
135 	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
136 	if (cookie == NULL) {
137 		cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
138 			virtual_vec_cookie;
139 		if (cookie == NULL)
140 			return B_NO_MEMORY;
141 
142 		_cookie = cookie;
143 	}
144 
145 	// recycle a potential previously mapped page
146 	cookie->PutPhysicalPageIfNeeded();
147 
148 	if (cookie->vec_index >= fVecCount)
149 		return B_BAD_INDEX;
150 
151 	if (!fPhysical) {
152 		vector.iov_base = (void*)(addr_t)fVecs[cookie->vec_index].base;
153 		vector.iov_len = fVecs[cookie->vec_index++].length;
154 		return B_OK;
155 	}
156 
157 	if (cookie->vec_index == 0
158 		&& (fVecCount > 1 || fVecs[0].length > B_PAGE_SIZE)) {
159 		void* mappedAddress;
160 		addr_t mappedSize;
161 
162 // TODO: This is a potential violation of the VIP requirement, since
163 // vm_map_physical_memory_vecs() allocates memory without special flags!
164 		cookie->mapped_area = vm_map_physical_memory_vecs(
165 			VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
166 			&mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
167 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);
168 
169 		if (cookie->mapped_area >= 0) {
170 			vector.iov_base = mappedAddress;
171 			vector.iov_len = mappedSize;
172 			return B_OK;
173 		} else
174 			ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
175 	}
176 
177 	// fallback to page wise mapping
178 	generic_io_vec& currentVec = fVecs[cookie->vec_index];
179 	generic_addr_t address = currentVec.base + cookie->vec_offset;
180 	size_t pageOffset = address % B_PAGE_SIZE;
181 
182 // TODO: This is a potential violation of the VIP requirement, since
183 // vm_get_physical_page() may allocate memory without special flags!
184 	status_t result = vm_get_physical_page(address - pageOffset,
185 		&cookie->virtual_address, &cookie->physical_page_handle);
186 	if (result != B_OK)
187 		return result;
188 
189 	generic_size_t length = min_c(currentVec.length - cookie->vec_offset,
190 		B_PAGE_SIZE - pageOffset);
191 
192 	vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
193 	vector.iov_len = length;
194 
195 	cookie->vec_offset += length;
196 	if (cookie->vec_offset >= currentVec.length) {
197 		cookie->vec_index++;
198 		cookie->vec_offset = 0;
199 	}
200 
201 	return B_OK;
202 }
203 
204 
205 void
206 IOBuffer::FreeVirtualVecCookie(void* _cookie)
207 {
208 	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
209 	if (cookie->mapped_area >= 0)
210 		delete_area(cookie->mapped_area);
211 
212 	cookie->PutPhysicalPageIfNeeded();
213 
214 	free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
215 }
216 
217 
218 status_t
219 IOBuffer::LockMemory(team_id team, bool isWrite)
220 {
221 	if (fMemoryLocked) {
222 		panic("memory already locked!");
223 		return B_BAD_VALUE;
224 	}
225 
226 	for (uint32 i = 0; i < fVecCount; i++) {
227 		status_t status = lock_memory_etc(team, (void*)(addr_t)fVecs[i].base,
228 			fVecs[i].length, isWrite ? 0 : B_READ_DEVICE);
229 		if (status != B_OK) {
230 			_UnlockMemory(team, i, isWrite);
231 			return status;
232 		}
233 	}
234 
235 	fMemoryLocked = true;
236 	return B_OK;
237 }
238 
239 
240 void
241 IOBuffer::_UnlockMemory(team_id team, size_t count, bool isWrite)
242 {
243 	for (uint32 i = 0; i < count; i++) {
244 		unlock_memory_etc(team, (void*)(addr_t)fVecs[i].base, fVecs[i].length,
245 			isWrite ? 0 : B_READ_DEVICE);
246 	}
247 }
248 
249 
250 void
251 IOBuffer::UnlockMemory(team_id team, bool isWrite)
252 {
253 	if (!fMemoryLocked) {
254 		panic("memory not locked");
255 		return;
256 	}
257 
258 	_UnlockMemory(team, fVecCount, isWrite);
259 	fMemoryLocked = false;
260 }
261 
262 
263 void
264 IOBuffer::Dump() const
265 {
266 	kprintf("IOBuffer at %p\n", this);
267 
268 	kprintf("  origin:     %s\n", fUser ? "user" : "kernel");
269 	kprintf("  kind:       %s\n", fPhysical ? "physical" : "virtual");
270 	kprintf("  length:     %" B_PRIuGENADDR "\n", fLength);
271 	kprintf("  capacity:   %" B_PRIuSIZE "\n", fCapacity);
272 	kprintf("  vecs:       %" B_PRIuSIZE "\n", fVecCount);
273 
274 	for (uint32 i = 0; i < fVecCount; i++) {
275 		kprintf("    [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
276 			i, fVecs[i].base, fVecs[i].length);
277 	}
278 }
279 
280 
281 // #pragma mark -
282 
283 
284 bool
285 IOOperation::Finish()
286 {
287 	TRACE("IOOperation::Finish()\n");
288 	if (fStatus == B_OK) {
289 		if (fParent->IsWrite()) {
290 			TRACE("  is write\n");
291 			if (fPhase == PHASE_READ_BEGIN) {
292 				TRACE("  phase read begin\n");
293 				// repair phase adjusted vec
294 				fDMABuffer->VecAt(fSavedVecIndex).length = fSavedVecLength;
295 
296 				// partial write: copy partial begin to bounce buffer
297 				bool skipReadEndPhase;
298 				status_t error = _CopyPartialBegin(true, skipReadEndPhase);
299 				if (error == B_OK) {
300 					// We're done with the first phase only (read in begin).
301 					// Get ready for next phase...
302 					fPhase = HasPartialEnd() && !skipReadEndPhase
303 						? PHASE_READ_END : PHASE_DO_ALL;
304 					_PrepareVecs();
305 					ResetStatus();
306 						// TODO: Is there a race condition, if the request is
307 						// aborted at the same time?
308 					return false;
309 				}
310 
311 				SetStatus(error);
312 			} else if (fPhase == PHASE_READ_END) {
313 				TRACE("  phase read end\n");
314 				// repair phase adjusted vec
315 				generic_io_vec& vec = fDMABuffer->VecAt(fSavedVecIndex);
316 				vec.base += vec.length - fSavedVecLength;
317 				vec.length = fSavedVecLength;
318 
319 				// partial write: copy partial end to bounce buffer
320 				status_t error = _CopyPartialEnd(true);
321 				if (error == B_OK) {
322 					// We're done with the second phase only (read in end).
323 					// Get ready for next phase...
324 					fPhase = PHASE_DO_ALL;
325 					ResetStatus();
326 						// TODO: Is there a race condition, if the request is
327 						// aborted at the same time?
328 					return false;
329 				}
330 
331 				SetStatus(error);
332 			}
333 		}
334 	}
335 
336 	if (fParent->IsRead() && UsesBounceBuffer()) {
337 		TRACE("  read with bounce buffer\n");
338 		// copy the bounce buffer segments to the final location
339 		uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
340 		phys_addr_t bounceBufferStart
341 			= fDMABuffer->PhysicalBounceBufferAddress();
342 		phys_addr_t bounceBufferEnd = bounceBufferStart
343 			+ fDMABuffer->BounceBufferSize();
344 
345 		const generic_io_vec* vecs = fDMABuffer->Vecs();
346 		uint32 vecCount = fDMABuffer->VecCount();
347 
348 		status_t error = B_OK;
349 
350 		// We iterate through the vecs we have read, moving offset (the device
351 		// offset) as we go. If [offset, offset + vec.length) intersects with
352 		// [startOffset, endOffset) we copy to the final location.
353 		off_t offset = fOffset;
354 		const off_t startOffset = fOriginalOffset;
355 		const off_t endOffset = fOriginalOffset + fOriginalLength;
356 
357 		for (uint32 i = 0; error == B_OK && i < vecCount; i++) {
358 			const generic_io_vec& vec = vecs[i];
359 			generic_addr_t base = vec.base;
360 			generic_size_t length = vec.length;
361 
362 			if (offset < startOffset) {
363 				// If the complete vector is before the start offset, skip it.
364 				if (offset + (off_t)length <= startOffset) {
365 					offset += length;
366 					continue;
367 				}
368 
369 				// The vector starts before the start offset, but intersects
370 				// with it. Skip the part we aren't interested in.
371 				generic_size_t diff = startOffset - offset;
372 				offset += diff;
373 				base += diff;
374 				length -= diff;
375 			}
376 
377 			if (offset + (off_t)length > endOffset) {
378 				// If we're already beyond the end offset, we're done.
379 				if (offset >= endOffset)
380 					break;
381 
382 				// The vector extends beyond the end offset -- cut it.
383 				length = endOffset - offset;
384 			}
385 
386 			if (base >= bounceBufferStart && base < bounceBufferEnd) {
387 				error = fParent->CopyData(
388 					bounceBuffer + (base - bounceBufferStart), offset, length);
389 			}
390 
391 			offset += length;
392 		}
393 
394 		if (error != B_OK)
395 			SetStatus(error);
396 	}
397 
398 	return true;
399 }
400 
401 
402 /*!	Note: SetPartial() must be called first!
403 */
404 status_t
405 IOOperation::Prepare(IORequest* request)
406 {
407 	if (fParent != NULL)
408 		fParent->RemoveOperation(this);
409 
410 	fParent = request;
411 
412 	fTransferredBytes = 0;
413 
414 	// set initial phase
415 	fPhase = PHASE_DO_ALL;
416 	if (fParent->IsWrite()) {
417 		// Copy data to bounce buffer segments, save the partial begin/end vec,
418 		// which will be copied after their respective read phase.
419 		if (UsesBounceBuffer()) {
420 			TRACE("  write with bounce buffer\n");
421 			uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
422 			phys_addr_t bounceBufferStart
423 				= fDMABuffer->PhysicalBounceBufferAddress();
424 			phys_addr_t bounceBufferEnd = bounceBufferStart
425 				+ fDMABuffer->BounceBufferSize();
426 
427 			const generic_io_vec* vecs = fDMABuffer->Vecs();
428 			uint32 vecCount = fDMABuffer->VecCount();
429 			generic_size_t vecOffset = 0;
430 			uint32 i = 0;
431 
432 			off_t offset = fOffset;
433 			off_t endOffset = fOffset + fLength;
434 
435 			if (HasPartialBegin()) {
436 				// skip first block
437 				generic_size_t toSkip = fBlockSize;
438 				while (toSkip > 0) {
439 					if (vecs[i].length <= toSkip) {
440 						toSkip -= vecs[i].length;
441 						i++;
442 					} else {
443 						vecOffset = toSkip;
444 						break;
445 					}
446 				}
447 
448 				offset += fBlockSize;
449 			}
450 
451 			if (HasPartialEnd()) {
452 				// skip last block
453 				generic_size_t toSkip = fBlockSize;
454 				while (toSkip > 0) {
455 					if (vecs[vecCount - 1].length <= toSkip) {
456 						toSkip -= vecs[vecCount - 1].length;
457 						vecCount--;
458 					} else
459 						break;
460 				}
461 
462 				endOffset -= fBlockSize;
463 			}
464 
465 			for (; i < vecCount; i++) {
466 				const generic_io_vec& vec = vecs[i];
467 				generic_addr_t base = vec.base + vecOffset;
468 				generic_size_t length = vec.length - vecOffset;
469 				vecOffset = 0;
470 
471 				if (base >= bounceBufferStart && base < bounceBufferEnd) {
472 					if (offset + (off_t)length > endOffset)
473 						length = endOffset - offset;
474 					status_t error = fParent->CopyData(offset,
475 						bounceBuffer + (base - bounceBufferStart), length);
476 					if (error != B_OK)
477 						return error;
478 				}
479 
480 				offset += length;
481 			}
482 		}
483 
484 		if (HasPartialBegin())
485 			fPhase = PHASE_READ_BEGIN;
486 		else if (HasPartialEnd())
487 			fPhase = PHASE_READ_END;
488 
489 		_PrepareVecs();
490 	}
491 
492 	ResetStatus();
493 
494 	if (fParent != NULL)
495 		fParent->AddOperation(this);
496 
497 	return B_OK;
498 }
499 
500 
501 void
502 IOOperation::SetOriginalRange(off_t offset, generic_size_t length)
503 {
504 	fOriginalOffset = fOffset = offset;
505 	fOriginalLength = fLength = length;
506 }
507 
508 
509 void
510 IOOperation::SetRange(off_t offset, generic_size_t length)
511 {
512 	fOffset = offset;
513 	fLength = length;
514 }
515 
516 
517 off_t
518 IOOperation::Offset() const
519 {
520 	return fPhase == PHASE_READ_END ? fOffset + fLength - fBlockSize : fOffset;
521 }
522 
523 
524 generic_size_t
525 IOOperation::Length() const
526 {
527 	return fPhase == PHASE_DO_ALL ? fLength : fBlockSize;
528 }
529 
530 
531 generic_io_vec*
532 IOOperation::Vecs() const
533 {
534 	switch (fPhase) {
535 		case PHASE_READ_END:
536 			return fDMABuffer->Vecs() + fSavedVecIndex;
537 		case PHASE_READ_BEGIN:
538 		case PHASE_DO_ALL:
539 		default:
540 			return fDMABuffer->Vecs();
541 	}
542 }
543 
544 
545 uint32
546 IOOperation::VecCount() const
547 {
548 	switch (fPhase) {
549 		case PHASE_READ_BEGIN:
550 			return fSavedVecIndex + 1;
551 		case PHASE_READ_END:
552 			return fDMABuffer->VecCount() - fSavedVecIndex;
553 		case PHASE_DO_ALL:
554 		default:
555 			return fDMABuffer->VecCount();
556 	}
557 }
558 
559 
560 void
561 IOOperation::SetPartial(bool partialBegin, bool partialEnd)
562 {
563 	TRACE("partial begin %d, end %d\n", partialBegin, partialEnd);
564 	fPartialBegin = partialBegin;
565 	fPartialEnd = partialEnd;
566 }
567 
568 
569 bool
570 IOOperation::IsWrite() const
571 {
572 	return fParent->IsWrite() && fPhase == PHASE_DO_ALL;
573 }
574 
575 
576 bool
577 IOOperation::IsRead() const
578 {
579 	return fParent->IsRead();
580 }
581 
582 
583 void
584 IOOperation::_PrepareVecs()
585 {
586 	// we need to prepare the vecs for consumption by the drivers
587 	if (fPhase == PHASE_READ_BEGIN) {
588 		generic_io_vec* vecs = fDMABuffer->Vecs();
589 		uint32 vecCount = fDMABuffer->VecCount();
590 		generic_size_t vecLength = fBlockSize;
591 		for (uint32 i = 0; i < vecCount; i++) {
592 			generic_io_vec& vec = vecs[i];
593 			if (vec.length >= vecLength) {
594 				fSavedVecIndex = i;
595 				fSavedVecLength = vec.length;
596 				vec.length = vecLength;
597 				break;
598 			}
599 			vecLength -= vec.length;
600 		}
601 	} else if (fPhase == PHASE_READ_END) {
602 		generic_io_vec* vecs = fDMABuffer->Vecs();
603 		uint32 vecCount = fDMABuffer->VecCount();
604 		generic_size_t vecLength = fBlockSize;
605 		for (int32 i = vecCount - 1; i >= 0; i--) {
606 			generic_io_vec& vec = vecs[i];
607 			if (vec.length >= vecLength) {
608 				fSavedVecIndex = i;
609 				fSavedVecLength = vec.length;
610 				vec.base += vec.length - vecLength;
611 				vec.length = vecLength;
612 				break;
613 			}
614 			vecLength -= vec.length;
615 		}
616 	}
617 }
618 
619 
620 status_t
621 IOOperation::_CopyPartialBegin(bool isWrite, bool& singleBlockOnly)
622 {
623 	generic_size_t relativeOffset = OriginalOffset() - fOffset;
624 	generic_size_t length = fBlockSize - relativeOffset;
625 
626 	singleBlockOnly = length >= OriginalLength();
627 	if (singleBlockOnly)
628 		length = OriginalLength();
629 
630 	TRACE("_CopyPartialBegin(%s, single only %d)\n",
631 		isWrite ? "write" : "read", singleBlockOnly);
632 
633 	if (isWrite) {
634 		return fParent->CopyData(OriginalOffset(),
635 			(uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset, length);
636 	} else {
637 		return fParent->CopyData(
638 			(uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset,
639 			OriginalOffset(), length);
640 	}
641 }
642 
643 
644 status_t
645 IOOperation::_CopyPartialEnd(bool isWrite)
646 {
647 	TRACE("_CopyPartialEnd(%s)\n", isWrite ? "write" : "read");
648 
649 	const generic_io_vec& lastVec
650 		= fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
651 	off_t lastVecPos = fOffset + fLength - fBlockSize;
652 	uint8* base = (uint8*)fDMABuffer->BounceBufferAddress()
653 		+ (lastVec.base + lastVec.length - fBlockSize
654 		- fDMABuffer->PhysicalBounceBufferAddress());
655 		// NOTE: this won't work if we don't use the bounce buffer contiguously
656 		// (because of boundary alignments).
657 	generic_size_t length = OriginalOffset() + OriginalLength() - lastVecPos;
658 
659 	if (isWrite)
660 		return fParent->CopyData(lastVecPos, base, length);
661 
662 	return fParent->CopyData(base, lastVecPos, length);
663 }
664 
665 
666 void
667 IOOperation::Dump() const
668 {
669 	kprintf("io_operation at %p\n", this);
670 
671 	kprintf("  parent:           %p\n", fParent);
672 	kprintf("  status:           %s\n", strerror(fStatus));
673 	kprintf("  dma buffer:       %p\n", fDMABuffer);
674 	kprintf("  offset:           %-8" B_PRIdOFF " (original: %" B_PRIdOFF ")\n",
675 		fOffset, fOriginalOffset);
676 	kprintf("  length:           %-8" B_PRIuGENADDR " (original: %"
677 		B_PRIuGENADDR ")\n", fLength, fOriginalLength);
678 	kprintf("  transferred:      %" B_PRIuGENADDR "\n", fTransferredBytes);
679 	kprintf("  block size:       %" B_PRIuGENADDR "\n", fBlockSize);
680 	kprintf("  saved vec index:  %u\n", fSavedVecIndex);
681 	kprintf("  saved vec length: %u\n", fSavedVecLength);
682 	kprintf("  r/w:              %s\n", IsWrite() ? "write" : "read");
683 	kprintf("  phase:            %s\n", fPhase == PHASE_READ_BEGIN
684 		? "read begin" : fPhase == PHASE_READ_END ? "read end"
685 		: fPhase == PHASE_DO_ALL ? "do all" : "unknown");
686 	kprintf("  partial begin:    %s\n", fPartialBegin ? "yes" : "no");
687 	kprintf("  partial end:      %s\n", fPartialEnd ? "yes" : "no");
688 	kprintf("  bounce buffer:    %s\n", fUsesBounceBuffer ? "yes" : "no");
689 
690 	set_debug_variable("_parent", (addr_t)fParent);
691 	set_debug_variable("_buffer", (addr_t)fDMABuffer);
692 }
693 
694 
695 // #pragma mark -
696 
697 
698 IORequest::IORequest()
699 	:
700 	fIsNotified(false),
701 	fFinishedCallback(NULL),
702 	fFinishedCookie(NULL),
703 	fIterationCallback(NULL),
704 	fIterationCookie(NULL)
705 {
706 	mutex_init(&fLock, "I/O request lock");
707 	fFinishedCondition.Init(this, "I/O request finished");
708 }
709 
710 
711 IORequest::~IORequest()
712 {
713 	mutex_lock(&fLock);
714 	DeleteSubRequests();
715 	if (fBuffer != NULL)
716 		fBuffer->Delete();
717 	mutex_destroy(&fLock);
718 }
719 
720 
721 /* static */ IORequest*
722 IORequest::Create(bool vip)
723 {
724 	return vip
725 		? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
726 		: new(std::nothrow) IORequest;
727 }
728 
729 
730 status_t
731 IORequest::Init(off_t offset, generic_addr_t buffer, generic_size_t length,
732 	bool write, uint32 flags)
733 {
734 	ASSERT(offset >= 0);
735 
736 	generic_io_vec vec;
737 	vec.base = buffer;
738 	vec.length = length;
739 	return Init(offset, &vec, 1, length, write, flags);
740 }
741 
742 
743 status_t
744 IORequest::Init(off_t offset, generic_size_t firstVecOffset,
745 	const generic_io_vec* vecs, size_t count, generic_size_t length, bool write,
746 	uint32 flags)
747 {
748 	ASSERT(offset >= 0);
749 
750 	fBuffer = IOBuffer::Create(count, (flags & B_VIP_IO_REQUEST) != 0);
751 	if (fBuffer == NULL)
752 		return B_NO_MEMORY;
753 
754 	fBuffer->SetVecs(firstVecOffset, vecs, count, length, flags);
755 
756 	fOwner = NULL;
757 	fOffset = offset;
758 	fLength = length;
759 	fRelativeParentOffset = 0;
760 	fTransferSize = 0;
761 	fFlags = flags;
762 	Thread* thread = thread_get_current_thread();
763 	fTeam = thread->team->id;
764 	fThread = thread->id;
765 	fIsWrite = write;
766 	fPartialTransfer = false;
767 	fSuppressChildNotifications = false;
768 
769 	// these are for iteration
770 	fVecIndex = 0;
771 	fVecOffset = 0;
772 	fRemainingBytes = length;
773 
774 	fPendingChildren = 0;
775 
776 	fStatus = 1;
777 
778 	return B_OK;
779 }
780 
781 
782 status_t
783 IORequest::CreateSubRequest(off_t parentOffset, off_t offset,
784 	generic_size_t length, IORequest*& _subRequest)
785 {
786 	ASSERT(parentOffset >= fOffset && length <= fLength
787 		&& parentOffset - fOffset <= (off_t)(fLength - length));
788 
789 	// find start vec
790 	generic_size_t vecOffset = parentOffset - fOffset;
791 	generic_io_vec* vecs = fBuffer->Vecs();
792 	int32 vecCount = fBuffer->VecCount();
793 	int32 startVec = 0;
794 	for (; startVec < vecCount; startVec++) {
795 		const generic_io_vec& vec = vecs[startVec];
796 		if (vecOffset < vec.length)
797 			break;
798 
799 		vecOffset -= vec.length;
800 	}
801 
802 	// count vecs
803 	generic_size_t currentVecOffset = vecOffset;
804 	int32 endVec = startVec;
805 	generic_size_t remainingLength = length;
806 	for (; endVec < vecCount; endVec++) {
807 		const generic_io_vec& vec = vecs[endVec];
808 		if (vec.length - currentVecOffset >= remainingLength)
809 			break;
810 
811 		remainingLength -= vec.length - currentVecOffset;
812 		currentVecOffset = 0;
813 	}
814 
815 	// create subrequest
816 	IORequest* subRequest = Create((fFlags & B_VIP_IO_REQUEST) != 0);
817 	if (subRequest == NULL)
818 		return B_NO_MEMORY;
819 
820 	status_t error = subRequest->Init(offset, vecOffset, vecs + startVec,
821 		endVec - startVec + 1, length, fIsWrite, fFlags & ~B_DELETE_IO_REQUEST);
822 	if (error != B_OK) {
823 		delete subRequest;
824 		return error;
825 	}
826 
827 	subRequest->fRelativeParentOffset = parentOffset - fOffset;
828 	subRequest->fTeam = fTeam;
829 	subRequest->fThread = fThread;
830 
831 	_subRequest = subRequest;
832 	subRequest->SetParent(this);
833 
834 	MutexLocker _(fLock);
835 
836 	fChildren.Add(subRequest);
837 	fPendingChildren++;
838 	TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
839 		subRequest);
840 
841 	return B_OK;
842 }
843 
844 
845 void
846 IORequest::DeleteSubRequests()
847 {
848 	while (IORequestChunk* chunk = fChildren.RemoveHead())
849 		delete chunk;
850 	fPendingChildren = 0;
851 }
852 
853 
854 void
855 IORequest::SetFinishedCallback(io_request_finished_callback callback,
856 	void* cookie)
857 {
858 	fFinishedCallback = callback;
859 	fFinishedCookie = cookie;
860 }
861 
862 
863 void
864 IORequest::SetIterationCallback(io_request_iterate_callback callback,
865 	void* cookie)
866 {
867 	fIterationCallback = callback;
868 	fIterationCookie = cookie;
869 }
870 
871 
872 io_request_finished_callback
873 IORequest::FinishedCallback(void** _cookie) const
874 {
875 	if (_cookie != NULL)
876 		*_cookie = fFinishedCookie;
877 	return fFinishedCallback;
878 }
879 
880 
881 status_t
882 IORequest::Wait(uint32 flags, bigtime_t timeout)
883 {
884 	MutexLocker locker(fLock);
885 
886 	if (IsFinished() && fIsNotified)
887 		return Status();
888 
889 	ConditionVariableEntry entry;
890 	fFinishedCondition.Add(&entry);
891 
892 	locker.Unlock();
893 
894 	status_t error = entry.Wait(flags, timeout);
895 	if (error != B_OK)
896 		return error;
897 
898 	return Status();
899 }
900 
901 
902 void
903 IORequest::NotifyFinished()
904 {
905 	TRACE("IORequest::NotifyFinished(): request: %p\n", this);
906 
907 	MutexLocker locker(fLock);
908 
909 	if (fStatus == B_OK && !fPartialTransfer && RemainingBytes() > 0) {
910 		// The request is not really done yet. If it has an iteration callback,
911 		// call it.
912 		if (fIterationCallback != NULL) {
913 			ResetStatus();
914 			locker.Unlock();
915 			bool partialTransfer = false;
916 			status_t error = fIterationCallback(fIterationCookie, this,
917 				&partialTransfer);
918 			if (error == B_OK && !partialTransfer)
919 				return;
920 
921 			// Iteration failed, which means we're responsible for notifying the
922 			// requests finished.
923 			locker.Lock();
924 			fStatus = error;
925 			fPartialTransfer = true;
926 		}
927 	}
928 
929 	ASSERT(!fIsNotified);
930 	ASSERT(fPendingChildren == 0);
931 	ASSERT(fChildren.IsEmpty()
932 		|| dynamic_cast<IOOperation*>(fChildren.Head()) == NULL);
933 
934 	// unlock the memory
935 	if (fBuffer->IsMemoryLocked())
936 		fBuffer->UnlockMemory(fTeam, fIsWrite);
937 
938 	// Cache the callbacks before we unblock waiters and unlock. Any of the
939 	// following could delete this request, so we don't want to touch it
940 	// once we have started telling others that it is done.
941 	IORequest* parent = fParent;
942 	io_request_finished_callback finishedCallback = fFinishedCallback;
943 	void* finishedCookie = fFinishedCookie;
944 	status_t status = fStatus;
945 	generic_size_t lastTransferredOffset
946 		= fRelativeParentOffset + fTransferSize;
947 	bool partialTransfer = status != B_OK || fPartialTransfer;
948 	bool deleteRequest = (fFlags & B_DELETE_IO_REQUEST) != 0;
949 
950 	// unblock waiters
951 	fIsNotified = true;
952 	fFinishedCondition.NotifyAll();
953 
954 	locker.Unlock();
955 
956 	// notify callback
957 	if (finishedCallback != NULL) {
958 		finishedCallback(finishedCookie, this, status, partialTransfer,
959 			lastTransferredOffset);
960 	}
961 
962 	// notify parent
963 	if (parent != NULL) {
964 		parent->SubRequestFinished(this, status, partialTransfer,
965 			lastTransferredOffset);
966 	}
967 
968 	if (deleteRequest)
969 		delete this;
970 }
971 
972 
973 /*!	Returns whether this request or any of it's ancestors has a finished or
974 	notification callback. Used to decide whether NotifyFinished() can be called
975 	synchronously.
976 */
977 bool
978 IORequest::HasCallbacks() const
979 {
980 	if (fFinishedCallback != NULL || fIterationCallback != NULL)
981 		return true;
982 
983 	return fParent != NULL && fParent->HasCallbacks();
984 }
985 
986 
987 void
988 IORequest::SetStatusAndNotify(status_t status)
989 {
990 	MutexLocker locker(fLock);
991 
992 	if (fStatus != 1)
993 		return;
994 
995 	fStatus = status;
996 
997 	locker.Unlock();
998 
999 	NotifyFinished();
1000 }
1001 
1002 
1003 void
1004 IORequest::OperationFinished(IOOperation* operation, status_t status,
1005 	bool partialTransfer, generic_size_t transferEndOffset)
1006 {
1007 	TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32 "): request: %p\n",
1008 		operation, status, this);
1009 
1010 	MutexLocker locker(fLock);
1011 
1012 	fChildren.Remove(operation);
1013 	operation->SetParent(NULL);
1014 
1015 	if (status != B_OK || partialTransfer) {
1016 		if (fTransferSize > transferEndOffset)
1017 			fTransferSize = transferEndOffset;
1018 		fPartialTransfer = true;
1019 	}
1020 
1021 	if (status != B_OK && fStatus == 1)
1022 		fStatus = status;
1023 
1024 	if (--fPendingChildren > 0)
1025 		return;
1026 
1027 	// last child finished
1028 
1029 	// set status, if not done yet
1030 	if (fStatus == 1)
1031 		fStatus = B_OK;
1032 }
1033 
1034 
1035 void
1036 IORequest::SubRequestFinished(IORequest* request, status_t status,
1037 	bool partialTransfer, generic_size_t transferEndOffset)
1038 {
1039 	TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32 ", %d, %"
1040 		B_PRIuGENADDR "): request: %p\n", request, status, partialTransfer, transferEndOffset, this);
1041 
1042 	MutexLocker locker(fLock);
1043 
1044 	if (status != B_OK || partialTransfer) {
1045 		if (fTransferSize > transferEndOffset)
1046 			fTransferSize = transferEndOffset;
1047 		fPartialTransfer = true;
1048 	}
1049 
1050 	if (status != B_OK && fStatus == 1)
1051 		fStatus = status;
1052 
1053 	if (--fPendingChildren > 0 || fSuppressChildNotifications)
1054 		return;
1055 
1056 	// last child finished
1057 
1058 	// set status, if not done yet
1059 	if (fStatus == 1)
1060 		fStatus = B_OK;
1061 
1062 	locker.Unlock();
1063 
1064 	NotifyFinished();
1065 }
1066 
1067 
1068 void
1069 IORequest::SetUnfinished()
1070 {
1071 	MutexLocker _(fLock);
1072 	ResetStatus();
1073 }
1074 
1075 
1076 void
1077 IORequest::SetTransferredBytes(bool partialTransfer,
1078 	generic_size_t transferredBytes)
1079 {
1080 	TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR ")\n", this,
1081 		partialTransfer, transferredBytes);
1082 
1083 	MutexLocker _(fLock);
1084 
1085 	fPartialTransfer = partialTransfer;
1086 	fTransferSize = transferredBytes;
1087 }
1088 
1089 
1090 void
1091 IORequest::SetSuppressChildNotifications(bool suppress)
1092 {
1093 	fSuppressChildNotifications = suppress;
1094 }
1095 
1096 
1097 void
1098 IORequest::Advance(generic_size_t bySize)
1099 {
1100 	TRACE("IORequest::Advance(%" B_PRIuGENADDR "): remaining: %" B_PRIuGENADDR
1101 		" -> %" B_PRIuGENADDR "\n", bySize, fRemainingBytes,
1102 		fRemainingBytes - bySize);
1103 	fRemainingBytes -= bySize;
1104 	fTransferSize += bySize;
1105 
1106 	generic_io_vec* vecs = fBuffer->Vecs();
1107 	uint32 vecCount = fBuffer->VecCount();
1108 	while (fVecIndex < vecCount
1109 			&& vecs[fVecIndex].length - fVecOffset <= bySize) {
1110 		bySize -= vecs[fVecIndex].length - fVecOffset;
1111 		fVecOffset = 0;
1112 		fVecIndex++;
1113 	}
1114 
1115 	fVecOffset += bySize;
1116 }
1117 
1118 
1119 IORequest*
1120 IORequest::FirstSubRequest()
1121 {
1122 	return dynamic_cast<IORequest*>(fChildren.Head());
1123 }
1124 
1125 
1126 IORequest*
1127 IORequest::NextSubRequest(IORequest* previous)
1128 {
1129 	if (previous == NULL)
1130 		return NULL;
1131 	return dynamic_cast<IORequest*>(fChildren.GetNext(previous));
1132 }
1133 
1134 
1135 void
1136 IORequest::AddOperation(IOOperation* operation)
1137 {
1138 	MutexLocker locker(fLock);
1139 	TRACE("IORequest::AddOperation(%p): request: %p\n", operation, this);
1140 	fChildren.Add(operation);
1141 	fPendingChildren++;
1142 }
1143 
1144 
1145 void
1146 IORequest::RemoveOperation(IOOperation* operation)
1147 {
1148 	MutexLocker locker(fLock);
1149 	fChildren.Remove(operation);
1150 	operation->SetParent(NULL);
1151 }
1152 
1153 
1154 status_t
1155 IORequest::CopyData(off_t offset, void* buffer, size_t size)
1156 {
1157 	return _CopyData(buffer, offset, size, true);
1158 }
1159 
1160 
1161 status_t
1162 IORequest::CopyData(const void* buffer, off_t offset, size_t size)
1163 {
1164 	return _CopyData((void*)buffer, offset, size, false);
1165 }
1166 
1167 
1168 status_t
1169 IORequest::ClearData(off_t offset, generic_size_t size)
1170 {
1171 	if (size == 0)
1172 		return B_OK;
1173 
1174 	if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1175 		panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1176 			", %" B_PRIuGENADDR ")", offset, size);
1177 		return B_BAD_VALUE;
1178 	}
1179 
1180 	// If we can, we directly copy from/to the virtual buffer. The memory is
1181 	// locked in this case.
1182 	status_t (*clearFunction)(generic_addr_t, generic_size_t, team_id);
1183 	if (fBuffer->IsPhysical()) {
1184 		clearFunction = &IORequest::_ClearDataPhysical;
1185 	} else {
1186 		clearFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1187 			? &IORequest::_ClearDataUser : &IORequest::_ClearDataSimple;
1188 	}
1189 
1190 	// skip bytes if requested
1191 	generic_io_vec* vecs = fBuffer->Vecs();
1192 	generic_size_t skipBytes = offset - fOffset;
1193 	generic_size_t vecOffset = 0;
1194 	while (skipBytes > 0) {
1195 		if (vecs[0].length > skipBytes) {
1196 			vecOffset = skipBytes;
1197 			break;
1198 		}
1199 
1200 		skipBytes -= vecs[0].length;
1201 		vecs++;
1202 	}
1203 
1204 	// clear vector-wise
1205 	while (size > 0) {
1206 		generic_size_t toClear = min_c(size, vecs[0].length - vecOffset);
1207 		status_t error = clearFunction(vecs[0].base + vecOffset, toClear,
1208 			fTeam);
1209 		if (error != B_OK)
1210 			return error;
1211 
1212 		size -= toClear;
1213 		vecs++;
1214 		vecOffset = 0;
1215 	}
1216 
1217 	return B_OK;
1218 
1219 }
1220 
1221 
1222 status_t
1223 IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
1224 {
1225 	if (size == 0)
1226 		return B_OK;
1227 
1228 	uint8* buffer = (uint8*)_buffer;
1229 
1230 	if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1231 		panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF ", %lu)",
1232 			offset, size);
1233 		return B_BAD_VALUE;
1234 	}
1235 
1236 	// If we can, we directly copy from/to the virtual buffer. The memory is
1237 	// locked in this case.
1238 	status_t (*copyFunction)(void*, generic_addr_t, size_t, team_id, bool);
1239 	if (fBuffer->IsPhysical()) {
1240 		copyFunction = &IORequest::_CopyPhysical;
1241 	} else {
1242 		copyFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1243 			? &IORequest::_CopyUser : &IORequest::_CopySimple;
1244 	}
1245 
1246 	// skip bytes if requested
1247 	generic_io_vec* vecs = fBuffer->Vecs();
1248 	generic_size_t skipBytes = offset - fOffset;
1249 	generic_size_t vecOffset = 0;
1250 	while (skipBytes > 0) {
1251 		if (vecs[0].length > skipBytes) {
1252 			vecOffset = skipBytes;
1253 			break;
1254 		}
1255 
1256 		skipBytes -= vecs[0].length;
1257 		vecs++;
1258 	}
1259 
1260 	// copy vector-wise
1261 	while (size > 0) {
1262 		generic_size_t toCopy = min_c(size, vecs[0].length - vecOffset);
1263 		status_t error = copyFunction(buffer, vecs[0].base + vecOffset, toCopy,
1264 			fTeam, copyIn);
1265 		if (error != B_OK)
1266 			return error;
1267 
1268 		buffer += toCopy;
1269 		size -= toCopy;
1270 		vecs++;
1271 		vecOffset = 0;
1272 	}
1273 
1274 	return B_OK;
1275 }
1276 
1277 
1278 /* static */ status_t
1279 IORequest::_CopySimple(void* bounceBuffer, generic_addr_t external, size_t size,
1280 	team_id team, bool copyIn)
1281 {
1282 	TRACE("  IORequest::_CopySimple(%p, %#" B_PRIxGENADDR ", %lu, %d)\n",
1283 		bounceBuffer, external, size, copyIn);
1284 	if (IS_USER_ADDRESS(external)) {
1285 		status_t status = B_OK;
1286 		if (copyIn)
1287 			status = user_memcpy(bounceBuffer, (void*)(addr_t)external, size);
1288 		else
1289 			status = user_memcpy((void*)(addr_t)external, bounceBuffer, size);
1290 		if (status < B_OK)
1291 			return status;
1292 		return B_OK;
1293 	}
1294 	if (copyIn)
1295 		memcpy(bounceBuffer, (void*)(addr_t)external, size);
1296 	else
1297 		memcpy((void*)(addr_t)external, bounceBuffer, size);
1298 	return B_OK;
1299 }
1300 
1301 
1302 /* static */ status_t
1303 IORequest::_CopyPhysical(void* bounceBuffer, generic_addr_t external,
1304 	size_t size, team_id team, bool copyIn)
1305 {
1306 	if (copyIn)
1307 		return vm_memcpy_from_physical(bounceBuffer, external, size, false);
1308 
1309 	return vm_memcpy_to_physical(external, bounceBuffer, size, false);
1310 }
1311 
1312 
1313 /* static */ status_t
1314 IORequest::_CopyUser(void* _bounceBuffer, generic_addr_t _external, size_t size,
1315 	team_id team, bool copyIn)
1316 {
1317 	uint8* bounceBuffer = (uint8*)_bounceBuffer;
1318 	uint8* external = (uint8*)(addr_t)_external;
1319 
1320 	while (size > 0) {
1321 		static const int32 kEntryCount = 8;
1322 		physical_entry entries[kEntryCount];
1323 
1324 		uint32 count = kEntryCount;
1325 		status_t error = get_memory_map_etc(team, external, size, entries,
1326 			&count);
1327 		if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1328 			panic("IORequest::_CopyUser(): Failed to get physical memory for "
1329 				"user memory %p\n", external);
1330 			return B_BAD_ADDRESS;
1331 		}
1332 
1333 		for (uint32 i = 0; i < count; i++) {
1334 			const physical_entry& entry = entries[i];
1335 			error = _CopyPhysical(bounceBuffer, entry.address, entry.size, team,
1336 				copyIn);
1337 			if (error != B_OK)
1338 				return error;
1339 
1340 			size -= entry.size;
1341 			bounceBuffer += entry.size;
1342 			external += entry.size;
1343 		}
1344 	}
1345 
1346 	return B_OK;
1347 }
1348 
1349 
1350 /*static*/ status_t
1351 IORequest::_ClearDataSimple(generic_addr_t external, generic_size_t size,
1352 	team_id team)
1353 {
1354 	memset((void*)(addr_t)external, 0, (size_t)size);
1355 	return B_OK;
1356 }
1357 
1358 
1359 /*static*/ status_t
1360 IORequest::_ClearDataPhysical(generic_addr_t external, generic_size_t size,
1361 	team_id team)
1362 {
1363 	return vm_memset_physical((phys_addr_t)external, 0, (phys_size_t)size);
1364 }
1365 
1366 
1367 /*static*/ status_t
1368 IORequest::_ClearDataUser(generic_addr_t _external, generic_size_t size,
1369 	team_id team)
1370 {
1371 	uint8* external = (uint8*)(addr_t)_external;
1372 
1373 	while (size > 0) {
1374 		static const int32 kEntryCount = 8;
1375 		physical_entry entries[kEntryCount];
1376 
1377 		uint32 count = kEntryCount;
1378 		status_t error = get_memory_map_etc(team, external, size, entries,
1379 			&count);
1380 		if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1381 			panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1382 				"for user memory %p\n", external);
1383 			return B_BAD_ADDRESS;
1384 		}
1385 
1386 		for (uint32 i = 0; i < count; i++) {
1387 			const physical_entry& entry = entries[i];
1388 			error = _ClearDataPhysical(entry.address, entry.size, team);
1389 			if (error != B_OK)
1390 				return error;
1391 
1392 			size -= entry.size;
1393 			external += entry.size;
1394 		}
1395 	}
1396 
1397 	return B_OK;
1398 }
1399 
1400 
1401 void
1402 IORequest::Dump() const
1403 {
1404 	kprintf("io_request at %p\n", this);
1405 
1406 	kprintf("  owner:             %p\n", fOwner);
1407 	kprintf("  parent:            %p\n", fParent);
1408 	kprintf("  status:            %s\n", strerror(fStatus));
1409 	kprintf("  mutex:             %p\n", &fLock);
1410 	kprintf("  IOBuffer:          %p\n", fBuffer);
1411 	kprintf("  offset:            %" B_PRIdOFF "\n", fOffset);
1412 	kprintf("  length:            %" B_PRIuGENADDR "\n", fLength);
1413 	kprintf("  transfer size:     %" B_PRIuGENADDR "\n", fTransferSize);
1414 	kprintf("  relative offset:   %" B_PRIuGENADDR "\n", fRelativeParentOffset);
1415 	kprintf("  pending children:  %" B_PRId32 "\n", fPendingChildren);
1416 	kprintf("  flags:             %#" B_PRIx32 "\n", fFlags);
1417 	kprintf("  team:              %" B_PRId32 "\n", fTeam);
1418 	kprintf("  thread:            %" B_PRId32 "\n", fThread);
1419 	kprintf("  r/w:               %s\n", fIsWrite ? "write" : "read");
1420 	kprintf("  partial transfer:  %s\n", fPartialTransfer ? "yes" : "no");
1421 	kprintf("  finished cvar:     %p\n", &fFinishedCondition);
1422 	kprintf("  iteration:\n");
1423 	kprintf("    vec index:       %" B_PRIu32 "\n", fVecIndex);
1424 	kprintf("    vec offset:      %" B_PRIuGENADDR "\n", fVecOffset);
1425 	kprintf("    remaining bytes: %" B_PRIuGENADDR "\n", fRemainingBytes);
1426 	kprintf("  callbacks:\n");
1427 	kprintf("    finished %p, cookie %p\n", fFinishedCallback, fFinishedCookie);
1428 	kprintf("    iteration %p, cookie %p\n", fIterationCallback,
1429 		fIterationCookie);
1430 	kprintf("  children:\n");
1431 
1432 	IORequestChunkList::ConstIterator iterator = fChildren.GetIterator();
1433 	while (iterator.HasNext()) {
1434 		kprintf("    %p\n", iterator.Next());
1435 	}
1436 
1437 	set_debug_variable("_parent", (addr_t)fParent);
1438 	set_debug_variable("_mutex", (addr_t)&fLock);
1439 	set_debug_variable("_buffer", (addr_t)fBuffer);
1440 	set_debug_variable("_cvar", (addr_t)&fFinishedCondition);
1441 }
1442