1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008-2017, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8 #include "IORequest.h"
9
10 #include <string.h>
11
12 #include <arch/debug.h>
13 #include <debug.h>
14 #include <heap.h>
15 #include <kernel.h>
16 #include <team.h>
17 #include <thread.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
21
22 #include "dma_resources.h"
23
24
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 # define TRACE(x...) dprintf(x)
28 #else
29 # define TRACE(x...) ;
30 #endif
31
32
33 // partial I/O operation phases
34 enum {
35 PHASE_READ_BEGIN = 0,
36 PHASE_READ_END = 1,
37 PHASE_DO_ALL = 2
38 };
39
40
41 struct virtual_vec_cookie {
42 uint32 vec_index;
43 generic_size_t vec_offset;
44 area_id mapped_area;
45 void* physical_page_handle;
46 addr_t virtual_address;
47
virtual_vec_cookievirtual_vec_cookie48 virtual_vec_cookie()
49 :
50 vec_index(0),
51 vec_offset(0),
52 mapped_area(-1),
53 physical_page_handle(NULL),
54 virtual_address((addr_t)-1)
55 {
56 }
57
PutPhysicalPageIfNeededvirtual_vec_cookie58 void PutPhysicalPageIfNeeded()
59 {
60 if (virtual_address != (addr_t)-1) {
61 vm_put_physical_page(virtual_address, physical_page_handle);
62 virtual_address = (addr_t)-1;
63 }
64 }
65 };
66
67
68 // #pragma mark -
69
70
IORequestChunk()71 IORequestChunk::IORequestChunk()
72 :
73 fParent(NULL),
74 fStatus(1)
75 {
76 }
77
78
~IORequestChunk()79 IORequestChunk::~IORequestChunk()
80 {
81 }
82
83
84 // #pragma mark -
85
86
87 IOBuffer*
Create(uint32 count,bool vip)88 IOBuffer::Create(uint32 count, bool vip)
89 {
90 size_t size = sizeof(IOBuffer) + sizeof(generic_io_vec) * (count - 1);
91 IOBuffer* buffer
92 = (IOBuffer*)(malloc_etc(size, vip ? HEAP_PRIORITY_VIP : 0));
93 if (buffer == NULL)
94 return NULL;
95
96 buffer->fCapacity = count;
97 buffer->fVecCount = 0;
98 buffer->fUser = false;
99 buffer->fPhysical = false;
100 buffer->fVIP = vip;
101 buffer->fMemoryLocked = false;
102
103 return buffer;
104 }
105
106
107 void
Delete()108 IOBuffer::Delete()
109 {
110 free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
111 }
112
113
114 void
SetVecs(generic_size_t firstVecOffset,generic_size_t lastVecSize,const generic_io_vec * vecs,uint32 count,generic_size_t length,uint32 flags)115 IOBuffer::SetVecs(generic_size_t firstVecOffset, generic_size_t lastVecSize,
116 const generic_io_vec* vecs, uint32 count, generic_size_t length, uint32 flags)
117 {
118 memcpy(fVecs, vecs, sizeof(generic_io_vec) * count);
119
120 if (count > 0 && firstVecOffset > 0) {
121 fVecs[0].base += firstVecOffset;
122 fVecs[0].length -= firstVecOffset;
123 }
124 if (lastVecSize > 0)
125 fVecs[count - 1].length = lastVecSize;
126
127 fVecCount = count;
128 fLength = length;
129 fPhysical = (flags & B_PHYSICAL_IO_REQUEST) != 0;
130 fUser = !fPhysical && IS_USER_ADDRESS(vecs[0].base);
131
132 #if KDEBUG
133 generic_size_t actualLength = 0;
134 for (size_t i = 0; i < fVecCount; i++)
135 actualLength += fVecs[i].length;
136
137 ASSERT(actualLength == fLength);
138 #endif
139 }
140
141
142 status_t
GetNextVirtualVec(void * & _cookie,iovec & vector)143 IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
144 {
145 virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
146 if (cookie == NULL) {
147 cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
148 virtual_vec_cookie;
149 if (cookie == NULL)
150 return B_NO_MEMORY;
151
152 _cookie = cookie;
153 }
154
155 // recycle a potential previously mapped page
156 cookie->PutPhysicalPageIfNeeded();
157
158 if (cookie->vec_index >= fVecCount)
159 return B_BAD_INDEX;
160
161 if (!fPhysical) {
162 vector.iov_base = (void*)(addr_t)fVecs[cookie->vec_index].base;
163 vector.iov_len = fVecs[cookie->vec_index++].length;
164 return B_OK;
165 }
166
167 if (cookie->vec_index == 0
168 && (fVecCount > 1 || fVecs[0].length > B_PAGE_SIZE)) {
169 void* mappedAddress;
170 addr_t mappedSize;
171 ASSERT(cookie->mapped_area == -1);
172
173 // TODO: This is a potential violation of the VIP requirement, since
174 // vm_map_physical_memory_vecs() allocates memory without special flags!
175 cookie->mapped_area = vm_map_physical_memory_vecs(
176 VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
177 &mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
178 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);
179
180 if (cookie->mapped_area >= 0) {
181 vector.iov_base = mappedAddress;
182 vector.iov_len = mappedSize;
183 return B_OK;
184 } else
185 ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
186 }
187
188 // fallback to page wise mapping
189 generic_io_vec& currentVec = fVecs[cookie->vec_index];
190 generic_addr_t address = currentVec.base + cookie->vec_offset;
191 size_t pageOffset = address % B_PAGE_SIZE;
192
193 // TODO: This is a potential violation of the VIP requirement, since
194 // vm_get_physical_page() may allocate memory without special flags!
195 status_t result = vm_get_physical_page(address - pageOffset,
196 &cookie->virtual_address, &cookie->physical_page_handle);
197 if (result != B_OK)
198 return result;
199
200 generic_size_t length = min_c(currentVec.length - cookie->vec_offset,
201 B_PAGE_SIZE - pageOffset);
202
203 vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
204 vector.iov_len = length;
205
206 cookie->vec_offset += length;
207 if (cookie->vec_offset >= currentVec.length) {
208 cookie->vec_index++;
209 cookie->vec_offset = 0;
210 }
211
212 return B_OK;
213 }
214
215
216 void
FreeVirtualVecCookie(void * _cookie)217 IOBuffer::FreeVirtualVecCookie(void* _cookie)
218 {
219 virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
220
221 if (cookie->mapped_area >= 0)
222 delete_area(cookie->mapped_area);
223 cookie->PutPhysicalPageIfNeeded();
224
225 free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
226 }
227
228
229 status_t
LockMemory(team_id team,bool isWrite)230 IOBuffer::LockMemory(team_id team, bool isWrite)
231 {
232 if (fMemoryLocked) {
233 panic("memory already locked!");
234 return B_BAD_VALUE;
235 }
236
237 for (uint32 i = 0; i < fVecCount; i++) {
238 status_t status = lock_memory_etc(team, (void*)(addr_t)fVecs[i].base,
239 fVecs[i].length, isWrite ? 0 : B_READ_DEVICE);
240 if (status != B_OK) {
241 _UnlockMemory(team, i, isWrite);
242 return status;
243 }
244 }
245
246 fMemoryLocked = true;
247 return B_OK;
248 }
249
250
251 void
_UnlockMemory(team_id team,size_t count,bool isWrite)252 IOBuffer::_UnlockMemory(team_id team, size_t count, bool isWrite)
253 {
254 for (uint32 i = 0; i < count; i++) {
255 unlock_memory_etc(team, (void*)(addr_t)fVecs[i].base, fVecs[i].length,
256 isWrite ? 0 : B_READ_DEVICE);
257 }
258 }
259
260
261 void
UnlockMemory(team_id team,bool isWrite)262 IOBuffer::UnlockMemory(team_id team, bool isWrite)
263 {
264 if (!fMemoryLocked) {
265 panic("memory not locked");
266 return;
267 }
268
269 _UnlockMemory(team, fVecCount, isWrite);
270 fMemoryLocked = false;
271 }
272
273
274 void
Dump() const275 IOBuffer::Dump() const
276 {
277 kprintf("IOBuffer at %p\n", this);
278
279 kprintf(" origin: %s\n", fUser ? "user" : "kernel");
280 kprintf(" kind: %s\n", fPhysical ? "physical" : "virtual");
281 kprintf(" length: %" B_PRIuGENADDR "\n", fLength);
282 kprintf(" capacity: %" B_PRIuSIZE "\n", fCapacity);
283 kprintf(" vecs: %" B_PRIuSIZE "\n", fVecCount);
284
285 for (uint32 i = 0; i < fVecCount; i++) {
286 kprintf(" [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
287 i, fVecs[i].base, fVecs[i].length);
288 }
289 }
290
291
292 // #pragma mark -
293
294
295 void
SetStatus(status_t status,generic_size_t completedLength)296 IOOperation::SetStatus(status_t status, generic_size_t completedLength)
297 {
298 IORequestChunk::SetStatus(status);
299 if (IsWrite() == fParent->IsWrite()) {
300 // Determine how many bytes we actually read or wrote,
301 // relative to the original range, not the translated range.
302 const generic_size_t partialBegin = (fOriginalOffset - fOffset);
303 generic_size_t originalTransferredBytes = completedLength;
304 if (originalTransferredBytes < partialBegin)
305 originalTransferredBytes = 0;
306 else
307 originalTransferredBytes -= partialBegin;
308
309 if (originalTransferredBytes > fOriginalLength)
310 originalTransferredBytes = fOriginalLength;
311
312 fTransferredBytes += originalTransferredBytes;
313 }
314 }
315
316
317 bool
Finish()318 IOOperation::Finish()
319 {
320 TRACE("IOOperation::Finish()\n");
321
322 if (fStatus == B_OK) {
323 if (fParent->IsWrite()) {
324 TRACE(" is write\n");
325 if (fPhase == PHASE_READ_BEGIN) {
326 TRACE(" phase read begin\n");
327 // repair phase adjusted vec
328 fDMABuffer->VecAt(fSavedVecIndex).length = fSavedVecLength;
329
330 // partial write: copy partial begin to bounce buffer
331 bool skipReadEndPhase;
332 status_t error = _CopyPartialBegin(true, skipReadEndPhase);
333 if (error == B_OK) {
334 // We're done with the first phase only (read in begin).
335 // Get ready for next phase...
336 fPhase = HasPartialEnd() && !skipReadEndPhase
337 ? PHASE_READ_END : PHASE_DO_ALL;
338 _PrepareVecs();
339 ResetStatus();
340 // TODO: Is there a race condition, if the request is
341 // aborted at the same time?
342 return false;
343 }
344
345 IORequestChunk::SetStatus(error);
346 } else if (fPhase == PHASE_READ_END) {
347 TRACE(" phase read end\n");
348 // repair phase adjusted vec
349 generic_io_vec& vec = fDMABuffer->VecAt(fSavedVecIndex);
350 vec.base += vec.length - fSavedVecLength;
351 vec.length = fSavedVecLength;
352
353 // partial write: copy partial end to bounce buffer
354 status_t error = _CopyPartialEnd(true);
355 if (error == B_OK) {
356 // We're done with the second phase only (read in end).
357 // Get ready for next phase...
358 fPhase = PHASE_DO_ALL;
359 ResetStatus();
360 // TODO: Is there a race condition, if the request is
361 // aborted at the same time?
362 return false;
363 }
364
365 IORequestChunk::SetStatus(error);
366 }
367 }
368 }
369
370 if (fParent->IsRead() && UsesBounceBuffer()) {
371 TRACE(" read with bounce buffer\n");
372 // copy the bounce buffer segments to the final location
373 uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
374 phys_addr_t bounceBufferStart
375 = fDMABuffer->PhysicalBounceBufferAddress();
376 phys_addr_t bounceBufferEnd = bounceBufferStart
377 + fDMABuffer->BounceBufferSize();
378
379 const generic_io_vec* vecs = fDMABuffer->Vecs();
380 uint32 vecCount = fDMABuffer->VecCount();
381
382 status_t error = B_OK;
383
384 // We iterate through the vecs we have read, moving offset (the device
385 // offset) as we go. If [offset, offset + vec.length) intersects with
386 // [startOffset, endOffset) we copy to the final location.
387 off_t offset = fOffset;
388 const off_t startOffset = fOriginalOffset;
389 const off_t endOffset = fOriginalOffset + fOriginalLength;
390
391 for (uint32 i = 0; error == B_OK && i < vecCount; i++) {
392 const generic_io_vec& vec = vecs[i];
393 generic_addr_t base = vec.base;
394 generic_size_t length = vec.length;
395
396 if (offset < startOffset) {
397 // If the complete vector is before the start offset, skip it.
398 if (offset + (off_t)length <= startOffset) {
399 offset += length;
400 continue;
401 }
402
403 // The vector starts before the start offset, but intersects
404 // with it. Skip the part we aren't interested in.
405 generic_size_t diff = startOffset - offset;
406 offset += diff;
407 base += diff;
408 length -= diff;
409 }
410
411 if (offset + (off_t)length > endOffset) {
412 // If we're already beyond the end offset, we're done.
413 if (offset >= endOffset)
414 break;
415
416 // The vector extends beyond the end offset -- cut it.
417 length = endOffset - offset;
418 }
419
420 if (base >= bounceBufferStart && base < bounceBufferEnd) {
421 error = fParent->CopyData(
422 bounceBuffer + (base - bounceBufferStart), offset, length);
423 }
424
425 offset += length;
426 }
427
428 if (error != B_OK)
429 IORequestChunk::SetStatus(error);
430 }
431
432 return true;
433 }
434
435
436 /*! Note: SetPartial() must be called first!
437 */
438 status_t
Prepare(IORequest * request)439 IOOperation::Prepare(IORequest* request)
440 {
441 if (fParent != NULL)
442 fParent->RemoveOperation(this);
443
444 fParent = request;
445
446 fTransferredBytes = 0;
447
448 // set initial phase
449 fPhase = PHASE_DO_ALL;
450 if (fParent->IsWrite()) {
451 // Copy data to bounce buffer segments, save the partial begin/end vec,
452 // which will be copied after their respective read phase.
453 if (UsesBounceBuffer()) {
454 TRACE(" write with bounce buffer\n");
455 uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
456 phys_addr_t bounceBufferStart
457 = fDMABuffer->PhysicalBounceBufferAddress();
458 phys_addr_t bounceBufferEnd = bounceBufferStart
459 + fDMABuffer->BounceBufferSize();
460
461 const generic_io_vec* vecs = fDMABuffer->Vecs();
462 uint32 vecCount = fDMABuffer->VecCount();
463 generic_size_t vecOffset = 0;
464 uint32 i = 0;
465
466 off_t offset = fOffset;
467 off_t endOffset = fOffset + fLength;
468
469 if (HasPartialBegin()) {
470 // skip first block
471 generic_size_t toSkip = fBlockSize;
472 while (toSkip > 0) {
473 if (vecs[i].length <= toSkip) {
474 toSkip -= vecs[i].length;
475 i++;
476 } else {
477 vecOffset = toSkip;
478 break;
479 }
480 }
481
482 offset += fBlockSize;
483 }
484
485 if (HasPartialEnd()) {
486 // skip last block
487 generic_size_t toSkip = fBlockSize;
488 while (toSkip > 0) {
489 if (vecs[vecCount - 1].length <= toSkip) {
490 toSkip -= vecs[vecCount - 1].length;
491 vecCount--;
492 } else
493 break;
494 }
495
496 endOffset -= fBlockSize;
497 }
498
499 for (; i < vecCount; i++) {
500 const generic_io_vec& vec = vecs[i];
501 generic_addr_t base = vec.base + vecOffset;
502 generic_size_t length = vec.length - vecOffset;
503 vecOffset = 0;
504
505 if (base >= bounceBufferStart && base < bounceBufferEnd) {
506 if (offset + (off_t)length > endOffset)
507 length = endOffset - offset;
508 status_t error = fParent->CopyData(offset,
509 bounceBuffer + (base - bounceBufferStart), length);
510 if (error != B_OK)
511 return error;
512 }
513
514 offset += length;
515 }
516 }
517
518 if (HasPartialBegin())
519 fPhase = PHASE_READ_BEGIN;
520 else if (HasPartialEnd())
521 fPhase = PHASE_READ_END;
522
523 _PrepareVecs();
524 }
525
526 ResetStatus();
527
528 if (fParent != NULL)
529 fParent->AddOperation(this);
530
531 return B_OK;
532 }
533
534
535 void
SetOriginalRange(off_t offset,generic_size_t length)536 IOOperation::SetOriginalRange(off_t offset, generic_size_t length)
537 {
538 fOriginalOffset = fOffset = offset;
539 fOriginalLength = fLength = length;
540 }
541
542
543 void
SetRange(off_t offset,generic_size_t length)544 IOOperation::SetRange(off_t offset, generic_size_t length)
545 {
546 fOffset = offset;
547 fLength = length;
548 }
549
550
551 off_t
Offset() const552 IOOperation::Offset() const
553 {
554 return fPhase == PHASE_READ_END ? fOffset + fLength - fBlockSize : fOffset;
555 }
556
557
558 generic_size_t
Length() const559 IOOperation::Length() const
560 {
561 return fPhase == PHASE_DO_ALL ? fLength : fBlockSize;
562 }
563
564
565 generic_io_vec*
Vecs() const566 IOOperation::Vecs() const
567 {
568 switch (fPhase) {
569 case PHASE_READ_END:
570 return fDMABuffer->Vecs() + fSavedVecIndex;
571 case PHASE_READ_BEGIN:
572 case PHASE_DO_ALL:
573 default:
574 return fDMABuffer->Vecs();
575 }
576 }
577
578
579 uint32
VecCount() const580 IOOperation::VecCount() const
581 {
582 switch (fPhase) {
583 case PHASE_READ_BEGIN:
584 return fSavedVecIndex + 1;
585 case PHASE_READ_END:
586 return fDMABuffer->VecCount() - fSavedVecIndex;
587 case PHASE_DO_ALL:
588 default:
589 return fDMABuffer->VecCount();
590 }
591 }
592
593
594 void
SetPartial(bool partialBegin,bool partialEnd)595 IOOperation::SetPartial(bool partialBegin, bool partialEnd)
596 {
597 TRACE("partial begin %d, end %d\n", partialBegin, partialEnd);
598 fPartialBegin = partialBegin;
599 fPartialEnd = partialEnd;
600 }
601
602
603 bool
IsWrite() const604 IOOperation::IsWrite() const
605 {
606 return fParent->IsWrite() && fPhase == PHASE_DO_ALL;
607 }
608
609
610 bool
IsRead() const611 IOOperation::IsRead() const
612 {
613 return fParent->IsRead();
614 }
615
616
617 void
_PrepareVecs()618 IOOperation::_PrepareVecs()
619 {
620 // we need to prepare the vecs for consumption by the drivers
621 if (fPhase == PHASE_READ_BEGIN) {
622 generic_io_vec* vecs = fDMABuffer->Vecs();
623 uint32 vecCount = fDMABuffer->VecCount();
624 generic_size_t vecLength = fBlockSize;
625 for (uint32 i = 0; i < vecCount; i++) {
626 generic_io_vec& vec = vecs[i];
627 if (vec.length >= vecLength) {
628 fSavedVecIndex = i;
629 fSavedVecLength = vec.length;
630 vec.length = vecLength;
631 break;
632 }
633 vecLength -= vec.length;
634 }
635 } else if (fPhase == PHASE_READ_END) {
636 generic_io_vec* vecs = fDMABuffer->Vecs();
637 uint32 vecCount = fDMABuffer->VecCount();
638 generic_size_t vecLength = fBlockSize;
639 for (int32 i = vecCount - 1; i >= 0; i--) {
640 generic_io_vec& vec = vecs[i];
641 if (vec.length >= vecLength) {
642 fSavedVecIndex = i;
643 fSavedVecLength = vec.length;
644 vec.base += vec.length - vecLength;
645 vec.length = vecLength;
646 break;
647 }
648 vecLength -= vec.length;
649 }
650 }
651 }
652
653
654 status_t
_CopyPartialBegin(bool isWrite,bool & singleBlockOnly)655 IOOperation::_CopyPartialBegin(bool isWrite, bool& singleBlockOnly)
656 {
657 generic_size_t relativeOffset = OriginalOffset() - fOffset;
658 generic_size_t length = fBlockSize - relativeOffset;
659
660 singleBlockOnly = length >= OriginalLength();
661 if (singleBlockOnly)
662 length = OriginalLength();
663
664 TRACE("_CopyPartialBegin(%s, single only %d)\n",
665 isWrite ? "write" : "read", singleBlockOnly);
666
667 if (isWrite) {
668 return fParent->CopyData(OriginalOffset(),
669 (uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset, length);
670 } else {
671 return fParent->CopyData(
672 (uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset,
673 OriginalOffset(), length);
674 }
675 }
676
677
678 status_t
_CopyPartialEnd(bool isWrite)679 IOOperation::_CopyPartialEnd(bool isWrite)
680 {
681 TRACE("_CopyPartialEnd(%s)\n", isWrite ? "write" : "read");
682
683 const generic_io_vec& lastVec
684 = fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
685 off_t lastVecPos = fOffset + fLength - fBlockSize;
686 uint8* base = (uint8*)fDMABuffer->BounceBufferAddress()
687 + (lastVec.base + lastVec.length - fBlockSize
688 - fDMABuffer->PhysicalBounceBufferAddress());
689 // NOTE: this won't work if we don't use the bounce buffer contiguously
690 // (because of boundary alignments).
691 generic_size_t length = OriginalOffset() + OriginalLength() - lastVecPos;
692
693 if (isWrite)
694 return fParent->CopyData(lastVecPos, base, length);
695
696 return fParent->CopyData(base, lastVecPos, length);
697 }
698
699
700 void
Dump() const701 IOOperation::Dump() const
702 {
703 kprintf("io_operation at %p\n", this);
704
705 kprintf(" parent: %p\n", fParent);
706 kprintf(" status: %s\n", strerror(fStatus));
707 kprintf(" dma buffer: %p\n", fDMABuffer);
708 kprintf(" offset: %-8" B_PRIdOFF " (original: %" B_PRIdOFF ")\n",
709 fOffset, fOriginalOffset);
710 kprintf(" length: %-8" B_PRIuGENADDR " (original: %"
711 B_PRIuGENADDR ")\n", fLength, fOriginalLength);
712 kprintf(" transferred: %" B_PRIuGENADDR "\n", fTransferredBytes);
713 kprintf(" block size: %" B_PRIuGENADDR "\n", fBlockSize);
714 kprintf(" saved vec index: %u\n", fSavedVecIndex);
715 kprintf(" saved vec length: %u\n", fSavedVecLength);
716 kprintf(" r/w: %s\n", IsWrite() ? "write" : "read");
717 kprintf(" phase: %s\n", fPhase == PHASE_READ_BEGIN
718 ? "read begin" : fPhase == PHASE_READ_END ? "read end"
719 : fPhase == PHASE_DO_ALL ? "do all" : "unknown");
720 kprintf(" partial begin: %s\n", fPartialBegin ? "yes" : "no");
721 kprintf(" partial end: %s\n", fPartialEnd ? "yes" : "no");
722 kprintf(" bounce buffer: %s\n", fUsesBounceBuffer ? "yes" : "no");
723
724 set_debug_variable("_parent", (addr_t)fParent);
725 set_debug_variable("_buffer", (addr_t)fDMABuffer);
726 }
727
728
729 // #pragma mark -
730
731
IORequest()732 IORequest::IORequest()
733 :
734 fIsNotified(false),
735 fFinishedCallback(NULL),
736 fFinishedCookie(NULL),
737 fIterationCallback(NULL),
738 fIterationCookie(NULL)
739 {
740 mutex_init(&fLock, "I/O request lock");
741 fFinishedCondition.Init(this, "I/O request finished");
742 }
743
744
~IORequest()745 IORequest::~IORequest()
746 {
747 mutex_lock(&fLock);
748 DeleteSubRequests();
749 if (fBuffer != NULL)
750 fBuffer->Delete();
751 mutex_destroy(&fLock);
752 }
753
754
755 /* static */ IORequest*
Create(bool vip)756 IORequest::Create(bool vip)
757 {
758 return vip
759 ? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
760 : new(std::nothrow) IORequest;
761 }
762
763
764 status_t
Init(off_t offset,generic_addr_t buffer,generic_size_t length,bool write,uint32 flags)765 IORequest::Init(off_t offset, generic_addr_t buffer, generic_size_t length,
766 bool write, uint32 flags)
767 {
768 ASSERT(offset >= 0);
769
770 generic_io_vec vec;
771 vec.base = buffer;
772 vec.length = length;
773 return Init(offset, &vec, 1, length, write, flags);
774 }
775
776
777 status_t
Init(off_t offset,generic_size_t firstVecOffset,generic_size_t lastVecSize,const generic_io_vec * vecs,size_t count,generic_size_t length,bool write,uint32 flags)778 IORequest::Init(off_t offset, generic_size_t firstVecOffset,
779 generic_size_t lastVecSize, const generic_io_vec* vecs, size_t count,
780 generic_size_t length, bool write, uint32 flags)
781 {
782 ASSERT(offset >= 0);
783
784 fBuffer = IOBuffer::Create(count, (flags & B_VIP_IO_REQUEST) != 0);
785 if (fBuffer == NULL)
786 return B_NO_MEMORY;
787
788 fBuffer->SetVecs(firstVecOffset, lastVecSize, vecs, count, length, flags);
789
790 fOwner = NULL;
791 fOffset = offset;
792 fLength = length;
793 fRelativeParentOffset = 0;
794 fTransferSize = 0;
795 fFlags = flags;
796 Thread* thread = thread_get_current_thread();
797 fTeam = thread->team->id;
798 fThread = thread->id;
799 fIsWrite = write;
800 fPartialTransfer = false;
801 fSuppressChildNotifications = false;
802
803 // these are for iteration
804 fVecIndex = 0;
805 fVecOffset = 0;
806 fRemainingBytes = length;
807
808 fPendingChildren = 0;
809
810 fStatus = 1;
811
812 return B_OK;
813 }
814
815
816 status_t
CreateSubRequest(off_t parentOffset,off_t offset,generic_size_t length,IORequest * & _subRequest)817 IORequest::CreateSubRequest(off_t parentOffset, off_t offset,
818 generic_size_t length, IORequest*& _subRequest)
819 {
820 ASSERT(parentOffset >= fOffset && length <= fLength
821 && parentOffset - fOffset <= (off_t)(fLength - length));
822
823 // find start vec
824 generic_size_t vecOffset = parentOffset - fOffset;
825 generic_io_vec* vecs = fBuffer->Vecs();
826 int32 vecCount = fBuffer->VecCount();
827 int32 startVec = 0;
828 for (; startVec < vecCount; startVec++) {
829 const generic_io_vec& vec = vecs[startVec];
830 if (vecOffset < vec.length)
831 break;
832
833 vecOffset -= vec.length;
834 }
835
836 // count vecs
837 generic_size_t currentVecOffset = vecOffset;
838 int32 endVec = startVec;
839 generic_size_t remainingLength = length;
840 for (; endVec < vecCount; endVec++) {
841 const generic_io_vec& vec = vecs[endVec];
842 if (vec.length - currentVecOffset >= remainingLength)
843 break;
844
845 remainingLength -= vec.length - currentVecOffset;
846 currentVecOffset = 0;
847 }
848
849 // create subrequest
850 IORequest* subRequest = Create((fFlags & B_VIP_IO_REQUEST) != 0);
851 if (subRequest == NULL)
852 return B_NO_MEMORY;
853
854 status_t error = subRequest->Init(offset, vecOffset, remainingLength,
855 vecs + startVec, endVec - startVec + 1, length, fIsWrite,
856 fFlags & ~B_DELETE_IO_REQUEST);
857 if (error != B_OK) {
858 delete subRequest;
859 return error;
860 }
861
862 subRequest->fRelativeParentOffset = parentOffset - fOffset;
863 subRequest->fTeam = fTeam;
864 subRequest->fThread = fThread;
865
866 _subRequest = subRequest;
867 subRequest->SetParent(this);
868
869 MutexLocker _(fLock);
870
871 fChildren.Add(subRequest);
872 fPendingChildren++;
873 TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
874 subRequest);
875
876 return B_OK;
877 }
878
879
880 void
DeleteSubRequests()881 IORequest::DeleteSubRequests()
882 {
883 while (IORequestChunk* chunk = fChildren.RemoveHead())
884 delete chunk;
885 fPendingChildren = 0;
886 }
887
888
889 void
SetFinishedCallback(io_request_finished_callback callback,void * cookie)890 IORequest::SetFinishedCallback(io_request_finished_callback callback,
891 void* cookie)
892 {
893 fFinishedCallback = callback;
894 fFinishedCookie = cookie;
895 }
896
897
898 void
SetIterationCallback(io_request_iterate_callback callback,void * cookie)899 IORequest::SetIterationCallback(io_request_iterate_callback callback,
900 void* cookie)
901 {
902 fIterationCallback = callback;
903 fIterationCookie = cookie;
904 }
905
906
907 io_request_finished_callback
FinishedCallback(void ** _cookie) const908 IORequest::FinishedCallback(void** _cookie) const
909 {
910 if (_cookie != NULL)
911 *_cookie = fFinishedCookie;
912 return fFinishedCallback;
913 }
914
915
916 status_t
Wait(uint32 flags,bigtime_t timeout)917 IORequest::Wait(uint32 flags, bigtime_t timeout)
918 {
919 MutexLocker locker(fLock);
920
921 if (IsFinished() && fIsNotified)
922 return Status();
923
924 ConditionVariableEntry entry;
925 fFinishedCondition.Add(&entry);
926
927 locker.Unlock();
928
929 status_t error = entry.Wait(flags, timeout);
930 if (error != B_OK)
931 return error;
932
933 return Status();
934 }
935
936
937 void
NotifyFinished()938 IORequest::NotifyFinished()
939 {
940 TRACE("IORequest::NotifyFinished(): request: %p\n", this);
941
942 MutexLocker locker(fLock);
943 ASSERT(fStatus != 1);
944
945 if (fStatus == B_OK && !fPartialTransfer && RemainingBytes() > 0) {
946 // The request is not really done yet. If it has an iteration callback,
947 // call it.
948 if (fIterationCallback != NULL) {
949 ResetStatus();
950 locker.Unlock();
951 bool partialTransfer = false;
952 status_t error = fIterationCallback(fIterationCookie, this,
953 &partialTransfer);
954 if (error == B_OK && !partialTransfer)
955 return;
956
957 // Iteration failed, which means we're responsible for notifying the
958 // requests finished.
959 locker.Lock();
960 fStatus = error;
961 fPartialTransfer = true;
962 }
963 }
964
965 ASSERT(!fIsNotified);
966 ASSERT(fPendingChildren == 0);
967 ASSERT(fChildren.IsEmpty()
968 || dynamic_cast<IOOperation*>(fChildren.Head()) == NULL);
969 ASSERT(fTransferSize <= fLength);
970
971 // unlock the memory
972 if (fBuffer->IsMemoryLocked())
973 fBuffer->UnlockMemory(fTeam, fIsWrite);
974
975 // Cache the callbacks before we unblock waiters and unlock. Any of the
976 // following could delete this request, so we don't want to touch it
977 // once we have started telling others that it is done.
978 IORequest* parent = fParent;
979 io_request_finished_callback finishedCallback = fFinishedCallback;
980 void* finishedCookie = fFinishedCookie;
981 status_t status = fStatus;
982 generic_size_t transferredBytes = fTransferSize;
983 generic_size_t lastTransferredOffset
984 = fRelativeParentOffset + transferredBytes;
985 bool partialTransfer = status != B_OK || fPartialTransfer;
986 bool deleteRequest = (fFlags & B_DELETE_IO_REQUEST) != 0;
987
988 // unblock waiters
989 fIsNotified = true;
990 fFinishedCondition.NotifyAll();
991
992 locker.Unlock();
993
994 // notify callback
995 if (finishedCallback != NULL) {
996 finishedCallback(finishedCookie, this, status, partialTransfer,
997 transferredBytes);
998 }
999
1000 // notify parent
1001 if (parent != NULL) {
1002 parent->SubRequestFinished(this, status, partialTransfer,
1003 lastTransferredOffset);
1004 }
1005
1006 if (deleteRequest)
1007 delete this;
1008 }
1009
1010
1011 /*! Returns whether this request or any of it's ancestors has a finished or
1012 notification callback. Used to decide whether NotifyFinished() can be called
1013 synchronously.
1014 */
1015 bool
HasCallbacks() const1016 IORequest::HasCallbacks() const
1017 {
1018 if (fFinishedCallback != NULL || fIterationCallback != NULL)
1019 return true;
1020
1021 return fParent != NULL && fParent->HasCallbacks();
1022 }
1023
1024
1025 void
SetStatusAndNotify(status_t status)1026 IORequest::SetStatusAndNotify(status_t status)
1027 {
1028 MutexLocker locker(fLock);
1029
1030 if (fStatus != 1)
1031 return;
1032
1033 fStatus = status;
1034
1035 locker.Unlock();
1036
1037 NotifyFinished();
1038 }
1039
1040
1041 void
OperationFinished(IOOperation * operation)1042 IORequest::OperationFinished(IOOperation* operation)
1043 {
1044 TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32 "): request: %p\n",
1045 operation, operation->Status(), this);
1046
1047 MutexLocker locker(fLock);
1048
1049 fChildren.Remove(operation);
1050 operation->SetParent(NULL);
1051
1052 const status_t status = operation->Status();
1053 const bool partialTransfer =
1054 (operation->TransferredBytes() < operation->OriginalLength());
1055 const generic_size_t transferEndOffset =
1056 (operation->OriginalOffset() - Offset()) + operation->TransferredBytes();
1057
1058 if (status != B_OK || partialTransfer) {
1059 if (fTransferSize > transferEndOffset)
1060 fTransferSize = transferEndOffset;
1061 fPartialTransfer = true;
1062 }
1063
1064 if (status != B_OK && fStatus == 1)
1065 fStatus = status;
1066
1067 if (--fPendingChildren > 0)
1068 return;
1069
1070 // last child finished
1071
1072 // set status, if not done yet
1073 if (fStatus == 1)
1074 fStatus = B_OK;
1075 }
1076
1077
1078 void
SubRequestFinished(IORequest * request,status_t status,bool partialTransfer,generic_size_t transferEndOffset)1079 IORequest::SubRequestFinished(IORequest* request, status_t status,
1080 bool partialTransfer, generic_size_t transferEndOffset)
1081 {
1082 TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32 ", %d, %" B_PRIuGENADDR
1083 "): request: %p\n", request, status, partialTransfer, transferEndOffset, this);
1084
1085 MutexLocker locker(fLock);
1086
1087 if (status != B_OK || partialTransfer) {
1088 if (fTransferSize > transferEndOffset)
1089 fTransferSize = transferEndOffset;
1090 fPartialTransfer = true;
1091 }
1092
1093 if (status != B_OK && fStatus == 1)
1094 fStatus = status;
1095
1096 if (--fPendingChildren > 0 || fSuppressChildNotifications)
1097 return;
1098
1099 // last child finished
1100
1101 // set status, if not done yet
1102 if (fStatus == 1)
1103 fStatus = B_OK;
1104
1105 locker.Unlock();
1106
1107 NotifyFinished();
1108 }
1109
1110
1111 void
SetUnfinished()1112 IORequest::SetUnfinished()
1113 {
1114 MutexLocker _(fLock);
1115 ResetStatus();
1116 }
1117
1118
1119 void
SetTransferredBytes(bool partialTransfer,generic_size_t transferredBytes)1120 IORequest::SetTransferredBytes(bool partialTransfer,
1121 generic_size_t transferredBytes)
1122 {
1123 TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR ")\n", this,
1124 partialTransfer, transferredBytes);
1125
1126 MutexLocker _(fLock);
1127
1128 fPartialTransfer = partialTransfer;
1129 fTransferSize = transferredBytes;
1130 }
1131
1132
1133 void
SetSuppressChildNotifications(bool suppress)1134 IORequest::SetSuppressChildNotifications(bool suppress)
1135 {
1136 fSuppressChildNotifications = suppress;
1137 }
1138
1139
1140 void
Advance(generic_size_t bySize)1141 IORequest::Advance(generic_size_t bySize)
1142 {
1143 TRACE("IORequest::Advance(%" B_PRIuGENADDR "): remaining: %" B_PRIuGENADDR
1144 " -> %" B_PRIuGENADDR "\n", bySize, fRemainingBytes,
1145 fRemainingBytes - bySize);
1146 fRemainingBytes -= bySize;
1147 fTransferSize += bySize;
1148
1149 generic_io_vec* vecs = fBuffer->Vecs();
1150 uint32 vecCount = fBuffer->VecCount();
1151 while (fVecIndex < vecCount
1152 && vecs[fVecIndex].length - fVecOffset <= bySize) {
1153 bySize -= vecs[fVecIndex].length - fVecOffset;
1154 fVecOffset = 0;
1155 fVecIndex++;
1156 }
1157
1158 fVecOffset += bySize;
1159 }
1160
1161
1162 IORequest*
FirstSubRequest()1163 IORequest::FirstSubRequest()
1164 {
1165 return dynamic_cast<IORequest*>(fChildren.Head());
1166 }
1167
1168
1169 IORequest*
NextSubRequest(IORequest * previous)1170 IORequest::NextSubRequest(IORequest* previous)
1171 {
1172 if (previous == NULL)
1173 return NULL;
1174 return dynamic_cast<IORequest*>(fChildren.GetNext(previous));
1175 }
1176
1177
1178 void
AddOperation(IOOperation * operation)1179 IORequest::AddOperation(IOOperation* operation)
1180 {
1181 MutexLocker locker(fLock);
1182 TRACE("IORequest::AddOperation(%p): request: %p\n", operation, this);
1183 fChildren.Add(operation);
1184 fPendingChildren++;
1185 }
1186
1187
1188 void
RemoveOperation(IOOperation * operation)1189 IORequest::RemoveOperation(IOOperation* operation)
1190 {
1191 MutexLocker locker(fLock);
1192 fChildren.Remove(operation);
1193 operation->SetParent(NULL);
1194 }
1195
1196
1197 status_t
CopyData(off_t offset,void * buffer,size_t size)1198 IORequest::CopyData(off_t offset, void* buffer, size_t size)
1199 {
1200 return _CopyData(buffer, offset, size, true);
1201 }
1202
1203
1204 status_t
CopyData(const void * buffer,off_t offset,size_t size)1205 IORequest::CopyData(const void* buffer, off_t offset, size_t size)
1206 {
1207 return _CopyData((void*)buffer, offset, size, false);
1208 }
1209
1210
1211 status_t
ClearData(off_t offset,generic_size_t size)1212 IORequest::ClearData(off_t offset, generic_size_t size)
1213 {
1214 if (size == 0)
1215 return B_OK;
1216
1217 if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1218 panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1219 ", %" B_PRIuGENADDR ")", offset, size);
1220 return B_BAD_VALUE;
1221 }
1222
1223 // If we can, we directly copy from/to the virtual buffer. The memory is
1224 // locked in this case.
1225 status_t (*clearFunction)(generic_addr_t, generic_size_t, team_id);
1226 if (fBuffer->IsPhysical()) {
1227 clearFunction = &IORequest::_ClearDataPhysical;
1228 } else {
1229 clearFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1230 ? &IORequest::_ClearDataUser : &IORequest::_ClearDataSimple;
1231 }
1232
1233 // skip bytes if requested
1234 generic_io_vec* vecs = fBuffer->Vecs();
1235 generic_size_t skipBytes = offset - fOffset;
1236 generic_size_t vecOffset = 0;
1237 while (skipBytes > 0) {
1238 if (vecs[0].length > skipBytes) {
1239 vecOffset = skipBytes;
1240 break;
1241 }
1242
1243 skipBytes -= vecs[0].length;
1244 vecs++;
1245 }
1246
1247 // clear vector-wise
1248 while (size > 0) {
1249 generic_size_t toClear = min_c(size, vecs[0].length - vecOffset);
1250 status_t error = clearFunction(vecs[0].base + vecOffset, toClear,
1251 fTeam);
1252 if (error != B_OK)
1253 return error;
1254
1255 size -= toClear;
1256 vecs++;
1257 vecOffset = 0;
1258 }
1259
1260 return B_OK;
1261
1262 }
1263
1264
1265 status_t
_CopyData(void * _buffer,off_t offset,size_t size,bool copyIn)1266 IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
1267 {
1268 if (size == 0)
1269 return B_OK;
1270
1271 uint8* buffer = (uint8*)_buffer;
1272
1273 if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1274 panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF ", %lu)",
1275 offset, size);
1276 return B_BAD_VALUE;
1277 }
1278
1279 // If we can, we directly copy from/to the virtual buffer. The memory is
1280 // locked in this case.
1281 status_t (*copyFunction)(void*, generic_addr_t, size_t, team_id, bool);
1282 if (fBuffer->IsPhysical()) {
1283 copyFunction = &IORequest::_CopyPhysical;
1284 } else {
1285 copyFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1286 ? &IORequest::_CopyUser : &IORequest::_CopySimple;
1287 }
1288
1289 // skip bytes if requested
1290 generic_io_vec* vecs = fBuffer->Vecs();
1291 generic_size_t skipBytes = offset - fOffset;
1292 generic_size_t vecOffset = 0;
1293 while (skipBytes > 0) {
1294 if (vecs[0].length > skipBytes) {
1295 vecOffset = skipBytes;
1296 break;
1297 }
1298
1299 skipBytes -= vecs[0].length;
1300 vecs++;
1301 }
1302
1303 // copy vector-wise
1304 while (size > 0) {
1305 generic_size_t toCopy = min_c(size, vecs[0].length - vecOffset);
1306 status_t error = copyFunction(buffer, vecs[0].base + vecOffset, toCopy,
1307 fTeam, copyIn);
1308 if (error != B_OK)
1309 return error;
1310
1311 buffer += toCopy;
1312 size -= toCopy;
1313 vecs++;
1314 vecOffset = 0;
1315 }
1316
1317 return B_OK;
1318 }
1319
1320
1321 /* static */ status_t
_CopySimple(void * bounceBuffer,generic_addr_t external,size_t size,team_id team,bool copyIn)1322 IORequest::_CopySimple(void* bounceBuffer, generic_addr_t external, size_t size,
1323 team_id team, bool copyIn)
1324 {
1325 TRACE(" IORequest::_CopySimple(%p, %#" B_PRIxGENADDR ", %lu, %d)\n",
1326 bounceBuffer, external, size, copyIn);
1327 if (IS_USER_ADDRESS(external)) {
1328 status_t status = B_OK;
1329 if (copyIn)
1330 status = user_memcpy(bounceBuffer, (void*)(addr_t)external, size);
1331 else
1332 status = user_memcpy((void*)(addr_t)external, bounceBuffer, size);
1333 if (status < B_OK)
1334 return status;
1335 return B_OK;
1336 }
1337 if (copyIn)
1338 memcpy(bounceBuffer, (void*)(addr_t)external, size);
1339 else
1340 memcpy((void*)(addr_t)external, bounceBuffer, size);
1341 return B_OK;
1342 }
1343
1344
1345 /* static */ status_t
_CopyPhysical(void * bounceBuffer,generic_addr_t external,size_t size,team_id team,bool copyIn)1346 IORequest::_CopyPhysical(void* bounceBuffer, generic_addr_t external,
1347 size_t size, team_id team, bool copyIn)
1348 {
1349 if (copyIn)
1350 return vm_memcpy_from_physical(bounceBuffer, external, size, false);
1351
1352 return vm_memcpy_to_physical(external, bounceBuffer, size, false);
1353 }
1354
1355
1356 /* static */ status_t
_CopyUser(void * _bounceBuffer,generic_addr_t _external,size_t size,team_id team,bool copyIn)1357 IORequest::_CopyUser(void* _bounceBuffer, generic_addr_t _external, size_t size,
1358 team_id team, bool copyIn)
1359 {
1360 uint8* bounceBuffer = (uint8*)_bounceBuffer;
1361 uint8* external = (uint8*)(addr_t)_external;
1362
1363 while (size > 0) {
1364 static const int32 kEntryCount = 8;
1365 physical_entry entries[kEntryCount];
1366
1367 uint32 count = kEntryCount;
1368 status_t error = get_memory_map_etc(team, external, size, entries,
1369 &count);
1370 if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1371 panic("IORequest::_CopyUser(): Failed to get physical memory for "
1372 "user memory %p\n", external);
1373 return B_BAD_ADDRESS;
1374 }
1375
1376 for (uint32 i = 0; i < count; i++) {
1377 const physical_entry& entry = entries[i];
1378 error = _CopyPhysical(bounceBuffer, entry.address, entry.size, team,
1379 copyIn);
1380 if (error != B_OK)
1381 return error;
1382
1383 size -= entry.size;
1384 bounceBuffer += entry.size;
1385 external += entry.size;
1386 }
1387 }
1388
1389 return B_OK;
1390 }
1391
1392
1393 /*static*/ status_t
_ClearDataSimple(generic_addr_t external,generic_size_t size,team_id team)1394 IORequest::_ClearDataSimple(generic_addr_t external, generic_size_t size,
1395 team_id team)
1396 {
1397 memset((void*)(addr_t)external, 0, (size_t)size);
1398 return B_OK;
1399 }
1400
1401
1402 /*static*/ status_t
_ClearDataPhysical(generic_addr_t external,generic_size_t size,team_id team)1403 IORequest::_ClearDataPhysical(generic_addr_t external, generic_size_t size,
1404 team_id team)
1405 {
1406 return vm_memset_physical((phys_addr_t)external, 0, (phys_size_t)size);
1407 }
1408
1409
1410 /*static*/ status_t
_ClearDataUser(generic_addr_t _external,generic_size_t size,team_id team)1411 IORequest::_ClearDataUser(generic_addr_t _external, generic_size_t size,
1412 team_id team)
1413 {
1414 uint8* external = (uint8*)(addr_t)_external;
1415
1416 while (size > 0) {
1417 static const int32 kEntryCount = 8;
1418 physical_entry entries[kEntryCount];
1419
1420 uint32 count = kEntryCount;
1421 status_t error = get_memory_map_etc(team, external, size, entries,
1422 &count);
1423 if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1424 panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1425 "for user memory %p\n", external);
1426 return B_BAD_ADDRESS;
1427 }
1428
1429 for (uint32 i = 0; i < count; i++) {
1430 const physical_entry& entry = entries[i];
1431 error = _ClearDataPhysical(entry.address, entry.size, team);
1432 if (error != B_OK)
1433 return error;
1434
1435 size -= entry.size;
1436 external += entry.size;
1437 }
1438 }
1439
1440 return B_OK;
1441 }
1442
1443
1444 void
Dump() const1445 IORequest::Dump() const
1446 {
1447 kprintf("io_request at %p\n", this);
1448
1449 kprintf(" owner: %p\n", fOwner);
1450 kprintf(" parent: %p\n", fParent);
1451 kprintf(" status: %s\n", strerror(fStatus));
1452 kprintf(" mutex: %p\n", &fLock);
1453 kprintf(" IOBuffer: %p\n", fBuffer);
1454 kprintf(" offset: %" B_PRIdOFF "\n", fOffset);
1455 kprintf(" length: %" B_PRIuGENADDR "\n", fLength);
1456 kprintf(" transfer size: %" B_PRIuGENADDR "\n", fTransferSize);
1457 kprintf(" relative offset: %" B_PRIuGENADDR "\n", fRelativeParentOffset);
1458 kprintf(" pending children: %" B_PRId32 "\n", fPendingChildren);
1459 kprintf(" flags: %#" B_PRIx32 "\n", fFlags);
1460 kprintf(" team: %" B_PRId32 "\n", fTeam);
1461 kprintf(" thread: %" B_PRId32 "\n", fThread);
1462 kprintf(" r/w: %s\n", fIsWrite ? "write" : "read");
1463 kprintf(" partial transfer: %s\n", fPartialTransfer ? "yes" : "no");
1464 kprintf(" finished cvar: %p\n", &fFinishedCondition);
1465 kprintf(" iteration:\n");
1466 kprintf(" vec index: %" B_PRIu32 "\n", fVecIndex);
1467 kprintf(" vec offset: %" B_PRIuGENADDR "\n", fVecOffset);
1468 kprintf(" remaining bytes: %" B_PRIuGENADDR "\n", fRemainingBytes);
1469 kprintf(" callbacks:\n");
1470 kprintf(" finished %p, cookie %p\n", fFinishedCallback, fFinishedCookie);
1471 kprintf(" iteration %p, cookie %p\n", fIterationCallback,
1472 fIterationCookie);
1473 kprintf(" children:\n");
1474
1475 IORequestChunkList::ConstIterator iterator = fChildren.GetIterator();
1476 while (iterator.HasNext()) {
1477 kprintf(" %p\n", iterator.Next());
1478 }
1479
1480 set_debug_variable("_parent", (addr_t)fParent);
1481 set_debug_variable("_mutex", (addr_t)&fLock);
1482 set_debug_variable("_buffer", (addr_t)fBuffer);
1483 set_debug_variable("_cvar", (addr_t)&fFinishedCondition);
1484 }
1485