xref: /haiku/src/system/kernel/device_manager/IORequest.cpp (revision 6889394848e2dc9f41ff53b12141d572822ca0c6)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008-2017, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "IORequest.h"
9 
10 #include <string.h>
11 
12 #include <arch/debug.h>
13 #include <debug.h>
14 #include <heap.h>
15 #include <kernel.h>
16 #include <team.h>
17 #include <thread.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
21 
22 #include "dma_resources.h"
23 
24 
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 #	define TRACE(x...) dprintf(x)
28 #else
29 #	define TRACE(x...) ;
30 #endif
31 
32 
33 // partial I/O operation phases
34 enum {
35 	PHASE_READ_BEGIN	= 0,
36 	PHASE_READ_END		= 1,
37 	PHASE_DO_ALL		= 2
38 };
39 
40 
41 struct virtual_vec_cookie {
42 	uint32			vec_index;
43 	generic_size_t	vec_offset;
44 	area_id			mapped_area;
45 	void*			physical_page_handle;
46 	addr_t			virtual_address;
47 
48 	virtual_vec_cookie()
49 		:
50 		vec_index(0),
51 		vec_offset(0),
52 		mapped_area(-1),
53 		physical_page_handle(NULL),
54 		virtual_address((addr_t)-1)
55 	{
56 	}
57 
58 	void PutPhysicalPageIfNeeded()
59 	{
60 		if (virtual_address != (addr_t)-1) {
61 			vm_put_physical_page(virtual_address, physical_page_handle);
62 			virtual_address = (addr_t)-1;
63 		}
64 	}
65 };
66 
67 
68 // #pragma mark -
69 
70 
71 IORequestChunk::IORequestChunk()
72 	:
73 	fParent(NULL),
74 	fStatus(1)
75 {
76 }
77 
78 
79 IORequestChunk::~IORequestChunk()
80 {
81 }
82 
83 
84 //	#pragma mark -
85 
86 
87 IOBuffer*
88 IOBuffer::Create(uint32 count, bool vip)
89 {
90 	size_t size = sizeof(IOBuffer) + sizeof(generic_io_vec) * (count - 1);
91 	IOBuffer* buffer
92 		= (IOBuffer*)(malloc_etc(size, vip ? HEAP_PRIORITY_VIP : 0));
93 	if (buffer == NULL)
94 		return NULL;
95 
96 	buffer->fCapacity = count;
97 	buffer->fVecCount = 0;
98 	buffer->fUser = false;
99 	buffer->fPhysical = false;
100 	buffer->fVIP = vip;
101 	buffer->fMemoryLocked = false;
102 
103 	return buffer;
104 }
105 
106 
107 void
108 IOBuffer::Delete()
109 {
110 	if (this == NULL)
111 		return;
112 
113 	free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
114 }
115 
116 
117 void
118 IOBuffer::SetVecs(generic_size_t firstVecOffset, const generic_io_vec* vecs,
119 	uint32 count, generic_size_t length, uint32 flags)
120 {
121 	memcpy(fVecs, vecs, sizeof(generic_io_vec) * count);
122 
123 	if (count > 0 && firstVecOffset > 0) {
124 		fVecs[0].base += firstVecOffset;
125 		fVecs[0].length -= firstVecOffset;
126 	}
127 
128 	fVecCount = count;
129 	fLength = length;
130 	fPhysical = (flags & B_PHYSICAL_IO_REQUEST) != 0;
131 	fUser = !fPhysical && IS_USER_ADDRESS(vecs[0].base);
132 }
133 
134 
135 status_t
136 IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
137 {
138 	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
139 	if (cookie == NULL) {
140 		cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
141 			virtual_vec_cookie;
142 		if (cookie == NULL)
143 			return B_NO_MEMORY;
144 
145 		_cookie = cookie;
146 	}
147 
148 	// recycle a potential previously mapped page
149 	cookie->PutPhysicalPageIfNeeded();
150 
151 	if (cookie->vec_index >= fVecCount)
152 		return B_BAD_INDEX;
153 
154 	if (!fPhysical) {
155 		vector.iov_base = (void*)(addr_t)fVecs[cookie->vec_index].base;
156 		vector.iov_len = fVecs[cookie->vec_index++].length;
157 		return B_OK;
158 	}
159 
160 	if (cookie->vec_index == 0
161 		&& (fVecCount > 1 || fVecs[0].length > B_PAGE_SIZE)) {
162 		void* mappedAddress;
163 		addr_t mappedSize;
164 
165 // TODO: This is a potential violation of the VIP requirement, since
166 // vm_map_physical_memory_vecs() allocates memory without special flags!
167 		cookie->mapped_area = vm_map_physical_memory_vecs(
168 			VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
169 			&mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
170 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);
171 
172 		if (cookie->mapped_area >= 0) {
173 			vector.iov_base = mappedAddress;
174 			vector.iov_len = mappedSize;
175 			return B_OK;
176 		} else
177 			ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
178 	}
179 
180 	// fallback to page wise mapping
181 	generic_io_vec& currentVec = fVecs[cookie->vec_index];
182 	generic_addr_t address = currentVec.base + cookie->vec_offset;
183 	size_t pageOffset = address % B_PAGE_SIZE;
184 
185 // TODO: This is a potential violation of the VIP requirement, since
186 // vm_get_physical_page() may allocate memory without special flags!
187 	status_t result = vm_get_physical_page(address - pageOffset,
188 		&cookie->virtual_address, &cookie->physical_page_handle);
189 	if (result != B_OK)
190 		return result;
191 
192 	generic_size_t length = min_c(currentVec.length - cookie->vec_offset,
193 		B_PAGE_SIZE - pageOffset);
194 
195 	vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
196 	vector.iov_len = length;
197 
198 	cookie->vec_offset += length;
199 	if (cookie->vec_offset >= currentVec.length) {
200 		cookie->vec_index++;
201 		cookie->vec_offset = 0;
202 	}
203 
204 	return B_OK;
205 }
206 
207 
208 void
209 IOBuffer::FreeVirtualVecCookie(void* _cookie)
210 {
211 	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
212 	if (cookie->mapped_area >= 0)
213 		delete_area(cookie->mapped_area);
214 
215 	cookie->PutPhysicalPageIfNeeded();
216 
217 	free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
218 }
219 
220 
221 status_t
222 IOBuffer::LockMemory(team_id team, bool isWrite)
223 {
224 	if (fMemoryLocked) {
225 		panic("memory already locked!");
226 		return B_BAD_VALUE;
227 	}
228 
229 	for (uint32 i = 0; i < fVecCount; i++) {
230 		status_t status = lock_memory_etc(team, (void*)(addr_t)fVecs[i].base,
231 			fVecs[i].length, isWrite ? 0 : B_READ_DEVICE);
232 		if (status != B_OK) {
233 			_UnlockMemory(team, i, isWrite);
234 			return status;
235 		}
236 	}
237 
238 	fMemoryLocked = true;
239 	return B_OK;
240 }
241 
242 
243 void
244 IOBuffer::_UnlockMemory(team_id team, size_t count, bool isWrite)
245 {
246 	for (uint32 i = 0; i < count; i++) {
247 		unlock_memory_etc(team, (void*)(addr_t)fVecs[i].base, fVecs[i].length,
248 			isWrite ? 0 : B_READ_DEVICE);
249 	}
250 }
251 
252 
253 void
254 IOBuffer::UnlockMemory(team_id team, bool isWrite)
255 {
256 	if (!fMemoryLocked) {
257 		panic("memory not locked");
258 		return;
259 	}
260 
261 	_UnlockMemory(team, fVecCount, isWrite);
262 	fMemoryLocked = false;
263 }
264 
265 
266 void
267 IOBuffer::Dump() const
268 {
269 	kprintf("IOBuffer at %p\n", this);
270 
271 	kprintf("  origin:     %s\n", fUser ? "user" : "kernel");
272 	kprintf("  kind:       %s\n", fPhysical ? "physical" : "virtual");
273 	kprintf("  length:     %" B_PRIuGENADDR "\n", fLength);
274 	kprintf("  capacity:   %" B_PRIuSIZE "\n", fCapacity);
275 	kprintf("  vecs:       %" B_PRIuSIZE "\n", fVecCount);
276 
277 	for (uint32 i = 0; i < fVecCount; i++) {
278 		kprintf("    [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
279 			i, fVecs[i].base, fVecs[i].length);
280 	}
281 }
282 
283 
284 // #pragma mark -
285 
286 
287 bool
288 IOOperation::Finish()
289 {
290 	TRACE("IOOperation::Finish()\n");
291 	if (fStatus == B_OK) {
292 		if (fParent->IsWrite()) {
293 			TRACE("  is write\n");
294 			if (fPhase == PHASE_READ_BEGIN) {
295 				TRACE("  phase read begin\n");
296 				// repair phase adjusted vec
297 				fDMABuffer->VecAt(fSavedVecIndex).length = fSavedVecLength;
298 
299 				// partial write: copy partial begin to bounce buffer
300 				bool skipReadEndPhase;
301 				status_t error = _CopyPartialBegin(true, skipReadEndPhase);
302 				if (error == B_OK) {
303 					// We're done with the first phase only (read in begin).
304 					// Get ready for next phase...
305 					fPhase = HasPartialEnd() && !skipReadEndPhase
306 						? PHASE_READ_END : PHASE_DO_ALL;
307 					_PrepareVecs();
308 					ResetStatus();
309 						// TODO: Is there a race condition, if the request is
310 						// aborted at the same time?
311 					return false;
312 				}
313 
314 				SetStatus(error);
315 			} else if (fPhase == PHASE_READ_END) {
316 				TRACE("  phase read end\n");
317 				// repair phase adjusted vec
318 				generic_io_vec& vec = fDMABuffer->VecAt(fSavedVecIndex);
319 				vec.base += vec.length - fSavedVecLength;
320 				vec.length = fSavedVecLength;
321 
322 				// partial write: copy partial end to bounce buffer
323 				status_t error = _CopyPartialEnd(true);
324 				if (error == B_OK) {
325 					// We're done with the second phase only (read in end).
326 					// Get ready for next phase...
327 					fPhase = PHASE_DO_ALL;
328 					ResetStatus();
329 						// TODO: Is there a race condition, if the request is
330 						// aborted at the same time?
331 					return false;
332 				}
333 
334 				SetStatus(error);
335 			}
336 		}
337 	}
338 
339 	if (fParent->IsRead() && UsesBounceBuffer()) {
340 		TRACE("  read with bounce buffer\n");
341 		// copy the bounce buffer segments to the final location
342 		uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
343 		phys_addr_t bounceBufferStart
344 			= fDMABuffer->PhysicalBounceBufferAddress();
345 		phys_addr_t bounceBufferEnd = bounceBufferStart
346 			+ fDMABuffer->BounceBufferSize();
347 
348 		const generic_io_vec* vecs = fDMABuffer->Vecs();
349 		uint32 vecCount = fDMABuffer->VecCount();
350 
351 		status_t error = B_OK;
352 
353 		// We iterate through the vecs we have read, moving offset (the device
354 		// offset) as we go. If [offset, offset + vec.length) intersects with
355 		// [startOffset, endOffset) we copy to the final location.
356 		off_t offset = fOffset;
357 		const off_t startOffset = fOriginalOffset;
358 		const off_t endOffset = fOriginalOffset + fOriginalLength;
359 
360 		for (uint32 i = 0; error == B_OK && i < vecCount; i++) {
361 			const generic_io_vec& vec = vecs[i];
362 			generic_addr_t base = vec.base;
363 			generic_size_t length = vec.length;
364 
365 			if (offset < startOffset) {
366 				// If the complete vector is before the start offset, skip it.
367 				if (offset + (off_t)length <= startOffset) {
368 					offset += length;
369 					continue;
370 				}
371 
372 				// The vector starts before the start offset, but intersects
373 				// with it. Skip the part we aren't interested in.
374 				generic_size_t diff = startOffset - offset;
375 				offset += diff;
376 				base += diff;
377 				length -= diff;
378 			}
379 
380 			if (offset + (off_t)length > endOffset) {
381 				// If we're already beyond the end offset, we're done.
382 				if (offset >= endOffset)
383 					break;
384 
385 				// The vector extends beyond the end offset -- cut it.
386 				length = endOffset - offset;
387 			}
388 
389 			if (base >= bounceBufferStart && base < bounceBufferEnd) {
390 				error = fParent->CopyData(
391 					bounceBuffer + (base - bounceBufferStart), offset, length);
392 			}
393 
394 			offset += length;
395 		}
396 
397 		if (error != B_OK)
398 			SetStatus(error);
399 	}
400 
401 	return true;
402 }
403 
404 
405 /*!	Note: SetPartial() must be called first!
406 */
407 status_t
408 IOOperation::Prepare(IORequest* request)
409 {
410 	if (fParent != NULL)
411 		fParent->RemoveOperation(this);
412 
413 	fParent = request;
414 
415 	fTransferredBytes = 0;
416 
417 	// set initial phase
418 	fPhase = PHASE_DO_ALL;
419 	if (fParent->IsWrite()) {
420 		// Copy data to bounce buffer segments, save the partial begin/end vec,
421 		// which will be copied after their respective read phase.
422 		if (UsesBounceBuffer()) {
423 			TRACE("  write with bounce buffer\n");
424 			uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
425 			phys_addr_t bounceBufferStart
426 				= fDMABuffer->PhysicalBounceBufferAddress();
427 			phys_addr_t bounceBufferEnd = bounceBufferStart
428 				+ fDMABuffer->BounceBufferSize();
429 
430 			const generic_io_vec* vecs = fDMABuffer->Vecs();
431 			uint32 vecCount = fDMABuffer->VecCount();
432 			generic_size_t vecOffset = 0;
433 			uint32 i = 0;
434 
435 			off_t offset = fOffset;
436 			off_t endOffset = fOffset + fLength;
437 
438 			if (HasPartialBegin()) {
439 				// skip first block
440 				generic_size_t toSkip = fBlockSize;
441 				while (toSkip > 0) {
442 					if (vecs[i].length <= toSkip) {
443 						toSkip -= vecs[i].length;
444 						i++;
445 					} else {
446 						vecOffset = toSkip;
447 						break;
448 					}
449 				}
450 
451 				offset += fBlockSize;
452 			}
453 
454 			if (HasPartialEnd()) {
455 				// skip last block
456 				generic_size_t toSkip = fBlockSize;
457 				while (toSkip > 0) {
458 					if (vecs[vecCount - 1].length <= toSkip) {
459 						toSkip -= vecs[vecCount - 1].length;
460 						vecCount--;
461 					} else
462 						break;
463 				}
464 
465 				endOffset -= fBlockSize;
466 			}
467 
468 			for (; i < vecCount; i++) {
469 				const generic_io_vec& vec = vecs[i];
470 				generic_addr_t base = vec.base + vecOffset;
471 				generic_size_t length = vec.length - vecOffset;
472 				vecOffset = 0;
473 
474 				if (base >= bounceBufferStart && base < bounceBufferEnd) {
475 					if (offset + (off_t)length > endOffset)
476 						length = endOffset - offset;
477 					status_t error = fParent->CopyData(offset,
478 						bounceBuffer + (base - bounceBufferStart), length);
479 					if (error != B_OK)
480 						return error;
481 				}
482 
483 				offset += length;
484 			}
485 		}
486 
487 		if (HasPartialBegin())
488 			fPhase = PHASE_READ_BEGIN;
489 		else if (HasPartialEnd())
490 			fPhase = PHASE_READ_END;
491 
492 		_PrepareVecs();
493 	}
494 
495 	ResetStatus();
496 
497 	if (fParent != NULL)
498 		fParent->AddOperation(this);
499 
500 	return B_OK;
501 }
502 
503 
504 void
505 IOOperation::SetOriginalRange(off_t offset, generic_size_t length)
506 {
507 	fOriginalOffset = fOffset = offset;
508 	fOriginalLength = fLength = length;
509 }
510 
511 
512 void
513 IOOperation::SetRange(off_t offset, generic_size_t length)
514 {
515 	fOffset = offset;
516 	fLength = length;
517 }
518 
519 
520 off_t
521 IOOperation::Offset() const
522 {
523 	return fPhase == PHASE_READ_END ? fOffset + fLength - fBlockSize : fOffset;
524 }
525 
526 
527 generic_size_t
528 IOOperation::Length() const
529 {
530 	return fPhase == PHASE_DO_ALL ? fLength : fBlockSize;
531 }
532 
533 
534 generic_io_vec*
535 IOOperation::Vecs() const
536 {
537 	switch (fPhase) {
538 		case PHASE_READ_END:
539 			return fDMABuffer->Vecs() + fSavedVecIndex;
540 		case PHASE_READ_BEGIN:
541 		case PHASE_DO_ALL:
542 		default:
543 			return fDMABuffer->Vecs();
544 	}
545 }
546 
547 
548 uint32
549 IOOperation::VecCount() const
550 {
551 	switch (fPhase) {
552 		case PHASE_READ_BEGIN:
553 			return fSavedVecIndex + 1;
554 		case PHASE_READ_END:
555 			return fDMABuffer->VecCount() - fSavedVecIndex;
556 		case PHASE_DO_ALL:
557 		default:
558 			return fDMABuffer->VecCount();
559 	}
560 }
561 
562 
563 void
564 IOOperation::SetPartial(bool partialBegin, bool partialEnd)
565 {
566 	TRACE("partial begin %d, end %d\n", partialBegin, partialEnd);
567 	fPartialBegin = partialBegin;
568 	fPartialEnd = partialEnd;
569 }
570 
571 
572 bool
573 IOOperation::IsWrite() const
574 {
575 	return fParent->IsWrite() && fPhase == PHASE_DO_ALL;
576 }
577 
578 
579 bool
580 IOOperation::IsRead() const
581 {
582 	return fParent->IsRead();
583 }
584 
585 
586 void
587 IOOperation::_PrepareVecs()
588 {
589 	// we need to prepare the vecs for consumption by the drivers
590 	if (fPhase == PHASE_READ_BEGIN) {
591 		generic_io_vec* vecs = fDMABuffer->Vecs();
592 		uint32 vecCount = fDMABuffer->VecCount();
593 		generic_size_t vecLength = fBlockSize;
594 		for (uint32 i = 0; i < vecCount; i++) {
595 			generic_io_vec& vec = vecs[i];
596 			if (vec.length >= vecLength) {
597 				fSavedVecIndex = i;
598 				fSavedVecLength = vec.length;
599 				vec.length = vecLength;
600 				break;
601 			}
602 			vecLength -= vec.length;
603 		}
604 	} else if (fPhase == PHASE_READ_END) {
605 		generic_io_vec* vecs = fDMABuffer->Vecs();
606 		uint32 vecCount = fDMABuffer->VecCount();
607 		generic_size_t vecLength = fBlockSize;
608 		for (int32 i = vecCount - 1; i >= 0; i--) {
609 			generic_io_vec& vec = vecs[i];
610 			if (vec.length >= vecLength) {
611 				fSavedVecIndex = i;
612 				fSavedVecLength = vec.length;
613 				vec.base += vec.length - vecLength;
614 				vec.length = vecLength;
615 				break;
616 			}
617 			vecLength -= vec.length;
618 		}
619 	}
620 }
621 
622 
623 status_t
624 IOOperation::_CopyPartialBegin(bool isWrite, bool& singleBlockOnly)
625 {
626 	generic_size_t relativeOffset = OriginalOffset() - fOffset;
627 	generic_size_t length = fBlockSize - relativeOffset;
628 
629 	singleBlockOnly = length >= OriginalLength();
630 	if (singleBlockOnly)
631 		length = OriginalLength();
632 
633 	TRACE("_CopyPartialBegin(%s, single only %d)\n",
634 		isWrite ? "write" : "read", singleBlockOnly);
635 
636 	if (isWrite) {
637 		return fParent->CopyData(OriginalOffset(),
638 			(uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset, length);
639 	} else {
640 		return fParent->CopyData(
641 			(uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset,
642 			OriginalOffset(), length);
643 	}
644 }
645 
646 
647 status_t
648 IOOperation::_CopyPartialEnd(bool isWrite)
649 {
650 	TRACE("_CopyPartialEnd(%s)\n", isWrite ? "write" : "read");
651 
652 	const generic_io_vec& lastVec
653 		= fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
654 	off_t lastVecPos = fOffset + fLength - fBlockSize;
655 	uint8* base = (uint8*)fDMABuffer->BounceBufferAddress()
656 		+ (lastVec.base + lastVec.length - fBlockSize
657 		- fDMABuffer->PhysicalBounceBufferAddress());
658 		// NOTE: this won't work if we don't use the bounce buffer contiguously
659 		// (because of boundary alignments).
660 	generic_size_t length = OriginalOffset() + OriginalLength() - lastVecPos;
661 
662 	if (isWrite)
663 		return fParent->CopyData(lastVecPos, base, length);
664 
665 	return fParent->CopyData(base, lastVecPos, length);
666 }
667 
668 
669 void
670 IOOperation::Dump() const
671 {
672 	kprintf("io_operation at %p\n", this);
673 
674 	kprintf("  parent:           %p\n", fParent);
675 	kprintf("  status:           %s\n", strerror(fStatus));
676 	kprintf("  dma buffer:       %p\n", fDMABuffer);
677 	kprintf("  offset:           %-8" B_PRIdOFF " (original: %" B_PRIdOFF ")\n",
678 		fOffset, fOriginalOffset);
679 	kprintf("  length:           %-8" B_PRIuGENADDR " (original: %"
680 		B_PRIuGENADDR ")\n", fLength, fOriginalLength);
681 	kprintf("  transferred:      %" B_PRIuGENADDR "\n", fTransferredBytes);
682 	kprintf("  block size:       %" B_PRIuGENADDR "\n", fBlockSize);
683 	kprintf("  saved vec index:  %u\n", fSavedVecIndex);
684 	kprintf("  saved vec length: %u\n", fSavedVecLength);
685 	kprintf("  r/w:              %s\n", IsWrite() ? "write" : "read");
686 	kprintf("  phase:            %s\n", fPhase == PHASE_READ_BEGIN
687 		? "read begin" : fPhase == PHASE_READ_END ? "read end"
688 		: fPhase == PHASE_DO_ALL ? "do all" : "unknown");
689 	kprintf("  partial begin:    %s\n", fPartialBegin ? "yes" : "no");
690 	kprintf("  partial end:      %s\n", fPartialEnd ? "yes" : "no");
691 	kprintf("  bounce buffer:    %s\n", fUsesBounceBuffer ? "yes" : "no");
692 
693 	set_debug_variable("_parent", (addr_t)fParent);
694 	set_debug_variable("_buffer", (addr_t)fDMABuffer);
695 }
696 
697 
698 // #pragma mark -
699 
700 
701 IORequest::IORequest()
702 	:
703 	fIsNotified(false),
704 	fFinishedCallback(NULL),
705 	fFinishedCookie(NULL),
706 	fIterationCallback(NULL),
707 	fIterationCookie(NULL)
708 {
709 	mutex_init(&fLock, "I/O request lock");
710 	fFinishedCondition.Init(this, "I/O request finished");
711 }
712 
713 
714 IORequest::~IORequest()
715 {
716 	mutex_lock(&fLock);
717 	DeleteSubRequests();
718 	fBuffer->Delete();
719 	mutex_destroy(&fLock);
720 }
721 
722 
723 /* static */ IORequest*
724 IORequest::Create(bool vip)
725 {
726 	return vip
727 		? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
728 		: new(std::nothrow) IORequest;
729 }
730 
731 
732 status_t
733 IORequest::Init(off_t offset, generic_addr_t buffer, generic_size_t length,
734 	bool write, uint32 flags)
735 {
736 	ASSERT(offset >= 0);
737 
738 	generic_io_vec vec;
739 	vec.base = buffer;
740 	vec.length = length;
741 	return Init(offset, &vec, 1, length, write, flags);
742 }
743 
744 
745 status_t
746 IORequest::Init(off_t offset, generic_size_t firstVecOffset,
747 	const generic_io_vec* vecs, size_t count, generic_size_t length, bool write,
748 	uint32 flags)
749 {
750 	ASSERT(offset >= 0);
751 
752 	fBuffer = IOBuffer::Create(count, (flags & B_VIP_IO_REQUEST) != 0);
753 	if (fBuffer == NULL)
754 		return B_NO_MEMORY;
755 
756 	fBuffer->SetVecs(firstVecOffset, vecs, count, length, flags);
757 
758 	fOwner = NULL;
759 	fOffset = offset;
760 	fLength = length;
761 	fRelativeParentOffset = 0;
762 	fTransferSize = 0;
763 	fFlags = flags;
764 	Thread* thread = thread_get_current_thread();
765 	fTeam = thread->team->id;
766 	fThread = thread->id;
767 	fIsWrite = write;
768 	fPartialTransfer = false;
769 	fSuppressChildNotifications = false;
770 
771 	// these are for iteration
772 	fVecIndex = 0;
773 	fVecOffset = 0;
774 	fRemainingBytes = length;
775 
776 	fPendingChildren = 0;
777 
778 	fStatus = 1;
779 
780 	return B_OK;
781 }
782 
783 
784 status_t
785 IORequest::CreateSubRequest(off_t parentOffset, off_t offset,
786 	generic_size_t length, IORequest*& _subRequest)
787 {
788 	ASSERT(parentOffset >= fOffset && length <= fLength
789 		&& parentOffset - fOffset <= (off_t)(fLength - length));
790 
791 	// find start vec
792 	generic_size_t vecOffset = parentOffset - fOffset;
793 	generic_io_vec* vecs = fBuffer->Vecs();
794 	int32 vecCount = fBuffer->VecCount();
795 	int32 startVec = 0;
796 	for (; startVec < vecCount; startVec++) {
797 		const generic_io_vec& vec = vecs[startVec];
798 		if (vecOffset < vec.length)
799 			break;
800 
801 		vecOffset -= vec.length;
802 	}
803 
804 	// count vecs
805 	generic_size_t currentVecOffset = vecOffset;
806 	int32 endVec = startVec;
807 	generic_size_t remainingLength = length;
808 	for (; endVec < vecCount; endVec++) {
809 		const generic_io_vec& vec = vecs[endVec];
810 		if (vec.length - currentVecOffset >= remainingLength)
811 			break;
812 
813 		remainingLength -= vec.length - currentVecOffset;
814 		currentVecOffset = 0;
815 	}
816 
817 	// create subrequest
818 	IORequest* subRequest = Create((fFlags & B_VIP_IO_REQUEST) != 0);
819 	if (subRequest == NULL)
820 		return B_NO_MEMORY;
821 
822 	status_t error = subRequest->Init(offset, vecOffset, vecs + startVec,
823 		endVec - startVec + 1, length, fIsWrite, fFlags & ~B_DELETE_IO_REQUEST);
824 	if (error != B_OK) {
825 		delete subRequest;
826 		return error;
827 	}
828 
829 	subRequest->fRelativeParentOffset = parentOffset - fOffset;
830 	subRequest->fTeam = fTeam;
831 	subRequest->fThread = fThread;
832 
833 	_subRequest = subRequest;
834 	subRequest->SetParent(this);
835 
836 	MutexLocker _(fLock);
837 
838 	fChildren.Add(subRequest);
839 	fPendingChildren++;
840 	TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
841 		subRequest);
842 
843 	return B_OK;
844 }
845 
846 
847 void
848 IORequest::DeleteSubRequests()
849 {
850 	while (IORequestChunk* chunk = fChildren.RemoveHead())
851 		delete chunk;
852 	fPendingChildren = 0;
853 }
854 
855 
856 void
857 IORequest::SetFinishedCallback(io_request_finished_callback callback,
858 	void* cookie)
859 {
860 	fFinishedCallback = callback;
861 	fFinishedCookie = cookie;
862 }
863 
864 
865 void
866 IORequest::SetIterationCallback(io_request_iterate_callback callback,
867 	void* cookie)
868 {
869 	fIterationCallback = callback;
870 	fIterationCookie = cookie;
871 }
872 
873 
874 io_request_finished_callback
875 IORequest::FinishedCallback(void** _cookie) const
876 {
877 	if (_cookie != NULL)
878 		*_cookie = fFinishedCookie;
879 	return fFinishedCallback;
880 }
881 
882 
883 status_t
884 IORequest::Wait(uint32 flags, bigtime_t timeout)
885 {
886 	MutexLocker locker(fLock);
887 
888 	if (IsFinished() && fIsNotified)
889 		return Status();
890 
891 	ConditionVariableEntry entry;
892 	fFinishedCondition.Add(&entry);
893 
894 	locker.Unlock();
895 
896 	status_t error = entry.Wait(flags, timeout);
897 	if (error != B_OK)
898 		return error;
899 
900 	return Status();
901 }
902 
903 
904 void
905 IORequest::NotifyFinished()
906 {
907 	TRACE("IORequest::NotifyFinished(): request: %p\n", this);
908 
909 	MutexLocker locker(fLock);
910 
911 	if (fStatus == B_OK && !fPartialTransfer && RemainingBytes() > 0) {
912 		// The request is not really done yet. If it has an iteration callback,
913 		// call it.
914 		if (fIterationCallback != NULL) {
915 			ResetStatus();
916 			locker.Unlock();
917 			bool partialTransfer = false;
918 			status_t error = fIterationCallback(fIterationCookie, this,
919 				&partialTransfer);
920 			if (error == B_OK && !partialTransfer)
921 				return;
922 
923 			// Iteration failed, which means we're responsible for notifying the
924 			// requests finished.
925 			locker.Lock();
926 			fStatus = error;
927 			fPartialTransfer = true;
928 		}
929 	}
930 
931 	ASSERT(!fIsNotified);
932 	ASSERT(fPendingChildren == 0);
933 	ASSERT(fChildren.IsEmpty()
934 		|| dynamic_cast<IOOperation*>(fChildren.Head()) == NULL);
935 
936 	// unlock the memory
937 	if (fBuffer->IsMemoryLocked())
938 		fBuffer->UnlockMemory(fTeam, fIsWrite);
939 
940 	// Cache the callbacks before we unblock waiters and unlock. Any of the
941 	// following could delete this request, so we don't want to touch it
942 	// once we have started telling others that it is done.
943 	IORequest* parent = fParent;
944 	io_request_finished_callback finishedCallback = fFinishedCallback;
945 	void* finishedCookie = fFinishedCookie;
946 	status_t status = fStatus;
947 	generic_size_t lastTransferredOffset
948 		= fRelativeParentOffset + fTransferSize;
949 	bool partialTransfer = status != B_OK || fPartialTransfer;
950 	bool deleteRequest = (fFlags & B_DELETE_IO_REQUEST) != 0;
951 
952 	// unblock waiters
953 	fIsNotified = true;
954 	fFinishedCondition.NotifyAll();
955 
956 	locker.Unlock();
957 
958 	// notify callback
959 	if (finishedCallback != NULL) {
960 		finishedCallback(finishedCookie, this, status, partialTransfer,
961 			lastTransferredOffset);
962 	}
963 
964 	// notify parent
965 	if (parent != NULL) {
966 		parent->SubRequestFinished(this, status, partialTransfer,
967 			lastTransferredOffset);
968 	}
969 
970 	if (deleteRequest)
971 		delete this;
972 }
973 
974 
975 /*!	Returns whether this request or any of it's ancestors has a finished or
976 	notification callback. Used to decide whether NotifyFinished() can be called
977 	synchronously.
978 */
979 bool
980 IORequest::HasCallbacks() const
981 {
982 	if (fFinishedCallback != NULL || fIterationCallback != NULL)
983 		return true;
984 
985 	return fParent != NULL && fParent->HasCallbacks();
986 }
987 
988 
989 void
990 IORequest::SetStatusAndNotify(status_t status)
991 {
992 	MutexLocker locker(fLock);
993 
994 	if (fStatus != 1)
995 		return;
996 
997 	fStatus = status;
998 
999 	locker.Unlock();
1000 
1001 	NotifyFinished();
1002 }
1003 
1004 
1005 void
1006 IORequest::OperationFinished(IOOperation* operation, status_t status,
1007 	bool partialTransfer, generic_size_t transferEndOffset)
1008 {
1009 	TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32 "): request: %p\n",
1010 		operation, status, this);
1011 
1012 	MutexLocker locker(fLock);
1013 
1014 	fChildren.Remove(operation);
1015 	operation->SetParent(NULL);
1016 
1017 	if (status != B_OK || partialTransfer) {
1018 		if (fTransferSize > transferEndOffset)
1019 			fTransferSize = transferEndOffset;
1020 		fPartialTransfer = true;
1021 	}
1022 
1023 	if (status != B_OK && fStatus == 1)
1024 		fStatus = status;
1025 
1026 	if (--fPendingChildren > 0)
1027 		return;
1028 
1029 	// last child finished
1030 
1031 	// set status, if not done yet
1032 	if (fStatus == 1)
1033 		fStatus = B_OK;
1034 }
1035 
1036 
1037 void
1038 IORequest::SubRequestFinished(IORequest* request, status_t status,
1039 	bool partialTransfer, generic_size_t transferEndOffset)
1040 {
1041 	TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32 ", %d, %"
1042 		B_PRIuGENADDR "): request: %p\n", request, status, partialTransfer, transferEndOffset, this);
1043 
1044 	MutexLocker locker(fLock);
1045 
1046 	if (status != B_OK || partialTransfer) {
1047 		if (fTransferSize > transferEndOffset)
1048 			fTransferSize = transferEndOffset;
1049 		fPartialTransfer = true;
1050 	}
1051 
1052 	if (status != B_OK && fStatus == 1)
1053 		fStatus = status;
1054 
1055 	if (--fPendingChildren > 0 || fSuppressChildNotifications)
1056 		return;
1057 
1058 	// last child finished
1059 
1060 	// set status, if not done yet
1061 	if (fStatus == 1)
1062 		fStatus = B_OK;
1063 
1064 	locker.Unlock();
1065 
1066 	NotifyFinished();
1067 }
1068 
1069 
1070 void
1071 IORequest::SetUnfinished()
1072 {
1073 	MutexLocker _(fLock);
1074 	ResetStatus();
1075 }
1076 
1077 
1078 void
1079 IORequest::SetTransferredBytes(bool partialTransfer,
1080 	generic_size_t transferredBytes)
1081 {
1082 	TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR ")\n", this,
1083 		partialTransfer, transferredBytes);
1084 
1085 	MutexLocker _(fLock);
1086 
1087 	fPartialTransfer = partialTransfer;
1088 	fTransferSize = transferredBytes;
1089 }
1090 
1091 
1092 void
1093 IORequest::SetSuppressChildNotifications(bool suppress)
1094 {
1095 	fSuppressChildNotifications = suppress;
1096 }
1097 
1098 
1099 void
1100 IORequest::Advance(generic_size_t bySize)
1101 {
1102 	TRACE("IORequest::Advance(%" B_PRIuGENADDR "): remaining: %" B_PRIuGENADDR
1103 		" -> %" B_PRIuGENADDR "\n", bySize, fRemainingBytes,
1104 		fRemainingBytes - bySize);
1105 	fRemainingBytes -= bySize;
1106 	fTransferSize += bySize;
1107 
1108 	generic_io_vec* vecs = fBuffer->Vecs();
1109 	uint32 vecCount = fBuffer->VecCount();
1110 	while (fVecIndex < vecCount
1111 			&& vecs[fVecIndex].length - fVecOffset <= bySize) {
1112 		bySize -= vecs[fVecIndex].length - fVecOffset;
1113 		fVecOffset = 0;
1114 		fVecIndex++;
1115 	}
1116 
1117 	fVecOffset += bySize;
1118 }
1119 
1120 
1121 IORequest*
1122 IORequest::FirstSubRequest()
1123 {
1124 	return dynamic_cast<IORequest*>(fChildren.Head());
1125 }
1126 
1127 
1128 IORequest*
1129 IORequest::NextSubRequest(IORequest* previous)
1130 {
1131 	if (previous == NULL)
1132 		return NULL;
1133 	return dynamic_cast<IORequest*>(fChildren.GetNext(previous));
1134 }
1135 
1136 
1137 void
1138 IORequest::AddOperation(IOOperation* operation)
1139 {
1140 	MutexLocker locker(fLock);
1141 	TRACE("IORequest::AddOperation(%p): request: %p\n", operation, this);
1142 	fChildren.Add(operation);
1143 	fPendingChildren++;
1144 }
1145 
1146 
1147 void
1148 IORequest::RemoveOperation(IOOperation* operation)
1149 {
1150 	MutexLocker locker(fLock);
1151 	fChildren.Remove(operation);
1152 	operation->SetParent(NULL);
1153 }
1154 
1155 
1156 status_t
1157 IORequest::CopyData(off_t offset, void* buffer, size_t size)
1158 {
1159 	return _CopyData(buffer, offset, size, true);
1160 }
1161 
1162 
1163 status_t
1164 IORequest::CopyData(const void* buffer, off_t offset, size_t size)
1165 {
1166 	return _CopyData((void*)buffer, offset, size, false);
1167 }
1168 
1169 
1170 status_t
1171 IORequest::ClearData(off_t offset, generic_size_t size)
1172 {
1173 	if (size == 0)
1174 		return B_OK;
1175 
1176 	if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1177 		panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1178 			", %" B_PRIuGENADDR ")", offset, size);
1179 		return B_BAD_VALUE;
1180 	}
1181 
1182 	// If we can, we directly copy from/to the virtual buffer. The memory is
1183 	// locked in this case.
1184 	status_t (*clearFunction)(generic_addr_t, generic_size_t, team_id);
1185 	if (fBuffer->IsPhysical()) {
1186 		clearFunction = &IORequest::_ClearDataPhysical;
1187 	} else {
1188 		clearFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1189 			? &IORequest::_ClearDataUser : &IORequest::_ClearDataSimple;
1190 	}
1191 
1192 	// skip bytes if requested
1193 	generic_io_vec* vecs = fBuffer->Vecs();
1194 	generic_size_t skipBytes = offset - fOffset;
1195 	generic_size_t vecOffset = 0;
1196 	while (skipBytes > 0) {
1197 		if (vecs[0].length > skipBytes) {
1198 			vecOffset = skipBytes;
1199 			break;
1200 		}
1201 
1202 		skipBytes -= vecs[0].length;
1203 		vecs++;
1204 	}
1205 
1206 	// clear vector-wise
1207 	while (size > 0) {
1208 		generic_size_t toClear = min_c(size, vecs[0].length - vecOffset);
1209 		status_t error = clearFunction(vecs[0].base + vecOffset, toClear,
1210 			fTeam);
1211 		if (error != B_OK)
1212 			return error;
1213 
1214 		size -= toClear;
1215 		vecs++;
1216 		vecOffset = 0;
1217 	}
1218 
1219 	return B_OK;
1220 
1221 }
1222 
1223 
1224 status_t
1225 IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
1226 {
1227 	if (size == 0)
1228 		return B_OK;
1229 
1230 	uint8* buffer = (uint8*)_buffer;
1231 
1232 	if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1233 		panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF ", %lu)",
1234 			offset, size);
1235 		return B_BAD_VALUE;
1236 	}
1237 
1238 	// If we can, we directly copy from/to the virtual buffer. The memory is
1239 	// locked in this case.
1240 	status_t (*copyFunction)(void*, generic_addr_t, size_t, team_id, bool);
1241 	if (fBuffer->IsPhysical()) {
1242 		copyFunction = &IORequest::_CopyPhysical;
1243 	} else {
1244 		copyFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1245 			? &IORequest::_CopyUser : &IORequest::_CopySimple;
1246 	}
1247 
1248 	// skip bytes if requested
1249 	generic_io_vec* vecs = fBuffer->Vecs();
1250 	generic_size_t skipBytes = offset - fOffset;
1251 	generic_size_t vecOffset = 0;
1252 	while (skipBytes > 0) {
1253 		if (vecs[0].length > skipBytes) {
1254 			vecOffset = skipBytes;
1255 			break;
1256 		}
1257 
1258 		skipBytes -= vecs[0].length;
1259 		vecs++;
1260 	}
1261 
1262 	// copy vector-wise
1263 	while (size > 0) {
1264 		generic_size_t toCopy = min_c(size, vecs[0].length - vecOffset);
1265 		status_t error = copyFunction(buffer, vecs[0].base + vecOffset, toCopy,
1266 			fTeam, copyIn);
1267 		if (error != B_OK)
1268 			return error;
1269 
1270 		buffer += toCopy;
1271 		size -= toCopy;
1272 		vecs++;
1273 		vecOffset = 0;
1274 	}
1275 
1276 	return B_OK;
1277 }
1278 
1279 
1280 /* static */ status_t
1281 IORequest::_CopySimple(void* bounceBuffer, generic_addr_t external, size_t size,
1282 	team_id team, bool copyIn)
1283 {
1284 	TRACE("  IORequest::_CopySimple(%p, %#" B_PRIxGENADDR ", %lu, %d)\n",
1285 		bounceBuffer, external, size, copyIn);
1286 	if (copyIn)
1287 		memcpy(bounceBuffer, (void*)(addr_t)external, size);
1288 	else
1289 		memcpy((void*)(addr_t)external, bounceBuffer, size);
1290 	return B_OK;
1291 }
1292 
1293 
1294 /* static */ status_t
1295 IORequest::_CopyPhysical(void* bounceBuffer, generic_addr_t external,
1296 	size_t size, team_id team, bool copyIn)
1297 {
1298 	if (copyIn)
1299 		return vm_memcpy_from_physical(bounceBuffer, external, size, false);
1300 
1301 	return vm_memcpy_to_physical(external, bounceBuffer, size, false);
1302 }
1303 
1304 
1305 /* static */ status_t
1306 IORequest::_CopyUser(void* _bounceBuffer, generic_addr_t _external, size_t size,
1307 	team_id team, bool copyIn)
1308 {
1309 	uint8* bounceBuffer = (uint8*)_bounceBuffer;
1310 	uint8* external = (uint8*)(addr_t)_external;
1311 
1312 	while (size > 0) {
1313 		static const int32 kEntryCount = 8;
1314 		physical_entry entries[kEntryCount];
1315 
1316 		uint32 count = kEntryCount;
1317 		status_t error = get_memory_map_etc(team, external, size, entries,
1318 			&count);
1319 		if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1320 			panic("IORequest::_CopyUser(): Failed to get physical memory for "
1321 				"user memory %p\n", external);
1322 			return B_BAD_ADDRESS;
1323 		}
1324 
1325 		for (uint32 i = 0; i < count; i++) {
1326 			const physical_entry& entry = entries[i];
1327 			error = _CopyPhysical(bounceBuffer, entry.address, entry.size, team,
1328 				copyIn);
1329 			if (error != B_OK)
1330 				return error;
1331 
1332 			size -= entry.size;
1333 			bounceBuffer += entry.size;
1334 			external += entry.size;
1335 		}
1336 	}
1337 
1338 	return B_OK;
1339 }
1340 
1341 
1342 /*static*/ status_t
1343 IORequest::_ClearDataSimple(generic_addr_t external, generic_size_t size,
1344 	team_id team)
1345 {
1346 	memset((void*)(addr_t)external, 0, (size_t)size);
1347 	return B_OK;
1348 }
1349 
1350 
1351 /*static*/ status_t
1352 IORequest::_ClearDataPhysical(generic_addr_t external, generic_size_t size,
1353 	team_id team)
1354 {
1355 	return vm_memset_physical((phys_addr_t)external, 0, (phys_size_t)size);
1356 }
1357 
1358 
1359 /*static*/ status_t
1360 IORequest::_ClearDataUser(generic_addr_t _external, generic_size_t size,
1361 	team_id team)
1362 {
1363 	uint8* external = (uint8*)(addr_t)_external;
1364 
1365 	while (size > 0) {
1366 		static const int32 kEntryCount = 8;
1367 		physical_entry entries[kEntryCount];
1368 
1369 		uint32 count = kEntryCount;
1370 		status_t error = get_memory_map_etc(team, external, size, entries,
1371 			&count);
1372 		if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1373 			panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1374 				"for user memory %p\n", external);
1375 			return B_BAD_ADDRESS;
1376 		}
1377 
1378 		for (uint32 i = 0; i < count; i++) {
1379 			const physical_entry& entry = entries[i];
1380 			error = _ClearDataPhysical(entry.address, entry.size, team);
1381 			if (error != B_OK)
1382 				return error;
1383 
1384 			size -= entry.size;
1385 			external += entry.size;
1386 		}
1387 	}
1388 
1389 	return B_OK;
1390 }
1391 
1392 
1393 void
1394 IORequest::Dump() const
1395 {
1396 	kprintf("io_request at %p\n", this);
1397 
1398 	kprintf("  owner:             %p\n", fOwner);
1399 	kprintf("  parent:            %p\n", fParent);
1400 	kprintf("  status:            %s\n", strerror(fStatus));
1401 	kprintf("  mutex:             %p\n", &fLock);
1402 	kprintf("  IOBuffer:          %p\n", fBuffer);
1403 	kprintf("  offset:            %" B_PRIdOFF "\n", fOffset);
1404 	kprintf("  length:            %" B_PRIuGENADDR "\n", fLength);
1405 	kprintf("  transfer size:     %" B_PRIuGENADDR "\n", fTransferSize);
1406 	kprintf("  relative offset:   %" B_PRIuGENADDR "\n", fRelativeParentOffset);
1407 	kprintf("  pending children:  %" B_PRId32 "\n", fPendingChildren);
1408 	kprintf("  flags:             %#" B_PRIx32 "\n", fFlags);
1409 	kprintf("  team:              %" B_PRId32 "\n", fTeam);
1410 	kprintf("  thread:            %" B_PRId32 "\n", fThread);
1411 	kprintf("  r/w:               %s\n", fIsWrite ? "write" : "read");
1412 	kprintf("  partial transfer:  %s\n", fPartialTransfer ? "yes" : "no");
1413 	kprintf("  finished cvar:     %p\n", &fFinishedCondition);
1414 	kprintf("  iteration:\n");
1415 	kprintf("    vec index:       %" B_PRIu32 "\n", fVecIndex);
1416 	kprintf("    vec offset:      %" B_PRIuGENADDR "\n", fVecOffset);
1417 	kprintf("    remaining bytes: %" B_PRIuGENADDR "\n", fRemainingBytes);
1418 	kprintf("  callbacks:\n");
1419 	kprintf("    finished %p, cookie %p\n", fFinishedCallback, fFinishedCookie);
1420 	kprintf("    iteration %p, cookie %p\n", fIterationCallback,
1421 		fIterationCookie);
1422 	kprintf("  children:\n");
1423 
1424 	IORequestChunkList::ConstIterator iterator = fChildren.GetIterator();
1425 	while (iterator.HasNext()) {
1426 		kprintf("    %p\n", iterator.Next());
1427 	}
1428 
1429 	set_debug_variable("_parent", (addr_t)fParent);
1430 	set_debug_variable("_mutex", (addr_t)&fLock);
1431 	set_debug_variable("_buffer", (addr_t)fBuffer);
1432 	set_debug_variable("_cvar", (addr_t)&fFinishedCondition);
1433 }
1434