xref: /haiku/src/system/kernel/device_manager/IORequest.cpp (revision a5a3b2d9a3d95cbae71eaf371708c73a1780ac0d)
1 /*
2  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 
8 #include "IORequest.h"
9 
10 #include <string.h>
11 
12 #include <arch/debug.h>
13 #include <debug.h>
14 #include <heap.h>
15 #include <kernel.h>
16 #include <team.h>
17 #include <thread.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
21 
22 #include "dma_resources.h"
23 
24 
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 #	define TRACE(x...) dprintf(x)
28 #else
29 #	define TRACE(x...) ;
30 #endif
31 
32 
33 // partial I/O operation phases
34 enum {
35 	PHASE_READ_BEGIN	= 0,
36 	PHASE_READ_END		= 1,
37 	PHASE_DO_ALL		= 2
38 };
39 
40 
41 // #pragma mark -
42 
43 
44 IORequestChunk::IORequestChunk()
45 	:
46 	fParent(NULL),
47 	fStatus(1)
48 {
49 }
50 
51 
52 IORequestChunk::~IORequestChunk()
53 {
54 }
55 
56 
57 //	#pragma mark -
58 
59 
60 struct virtual_vec_cookie {
61 	uint32			vec_index;
62 	generic_size_t	vec_offset;
63 	area_id			mapped_area;
64 	void*			physical_page_handle;
65 	addr_t			virtual_address;
66 };
67 
68 
69 IOBuffer*
70 IOBuffer::Create(uint32 count, bool vip)
71 {
72 	size_t size = sizeof(IOBuffer) + sizeof(generic_io_vec) * (count - 1);
73 	IOBuffer* buffer
74 		= (IOBuffer*)(malloc_etc(size, vip ? HEAP_PRIORITY_VIP : 0));
75 	if (buffer == NULL)
76 		return NULL;
77 
78 	buffer->fCapacity = count;
79 	buffer->fVecCount = 0;
80 	buffer->fUser = false;
81 	buffer->fPhysical = false;
82 	buffer->fVIP = vip;
83 	buffer->fMemoryLocked = false;
84 
85 	return buffer;
86 }
87 
88 
89 void
90 IOBuffer::Delete()
91 {
92 	if (this == NULL)
93 		return;
94 
95 	free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
96 }
97 
98 
99 void
100 IOBuffer::SetVecs(generic_size_t firstVecOffset, const generic_io_vec* vecs,
101 	uint32 count, generic_size_t length, uint32 flags)
102 {
103 	memcpy(fVecs, vecs, sizeof(generic_io_vec) * count);
104 
105 	if (count > 0 && firstVecOffset > 0) {
106 		fVecs[0].base += firstVecOffset;
107 		fVecs[0].length -= firstVecOffset;
108 	}
109 
110 	fVecCount = count;
111 	fLength = length;
112 	fPhysical = (flags & B_PHYSICAL_IO_REQUEST) != 0;
113 	fUser = !fPhysical && IS_USER_ADDRESS(vecs[0].base);
114 }
115 
116 
117 status_t
118 IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
119 {
120 	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
121 	if (cookie == NULL) {
122 		cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
123 			virtual_vec_cookie;
124 		if (cookie == NULL)
125 			return B_NO_MEMORY;
126 
127 		cookie->vec_index = 0;
128 		cookie->vec_offset = 0;
129 		cookie->mapped_area = -1;
130 		cookie->physical_page_handle = NULL;
131 		cookie->virtual_address = 0;
132 		_cookie = cookie;
133 	}
134 
135 	// recycle a potential previously mapped page
136 	if (cookie->physical_page_handle != NULL) {
137 // TODO: This check is invalid! The physical page mapper is not required to
138 // return a non-NULL handle (the generic implementation does not)!
139 		vm_put_physical_page(cookie->virtual_address,
140 			cookie->physical_page_handle);
141 	}
142 
143 	if (cookie->vec_index >= fVecCount)
144 		return B_BAD_INDEX;
145 
146 	if (!fPhysical) {
147 		vector.iov_base = (void*)(addr_t)fVecs[cookie->vec_index].base;
148 		vector.iov_len = fVecs[cookie->vec_index++].length;
149 		return B_OK;
150 	}
151 
152 	if (cookie->vec_index == 0
153 		&& (fVecCount > 1 || fVecs[0].length > B_PAGE_SIZE)) {
154 		void* mappedAddress;
155 		addr_t mappedSize;
156 
157 // TODO: This is a potential violation of the VIP requirement, since
158 // vm_map_physical_memory_vecs() allocates memory without special flags!
159 		cookie->mapped_area = vm_map_physical_memory_vecs(
160 			VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
161 			&mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
162 			B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);
163 
164 		if (cookie->mapped_area >= 0) {
165 			vector.iov_base = mappedAddress;
166 			vector.iov_len = mappedSize;
167 			return B_OK;
168 		} else
169 			ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
170 	}
171 
172 	// fallback to page wise mapping
173 	generic_io_vec& currentVec = fVecs[cookie->vec_index];
174 	generic_addr_t address = currentVec.base + cookie->vec_offset;
175 	size_t pageOffset = address % B_PAGE_SIZE;
176 
177 // TODO: This is a potential violation of the VIP requirement, since
178 // vm_get_physical_page() may allocate memory without special flags!
179 	status_t result = vm_get_physical_page(address - pageOffset,
180 		&cookie->virtual_address, &cookie->physical_page_handle);
181 	if (result != B_OK)
182 		return result;
183 
184 	generic_size_t length = min_c(currentVec.length - cookie->vec_offset,
185 		B_PAGE_SIZE - pageOffset);
186 
187 	vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
188 	vector.iov_len = length;
189 
190 	cookie->vec_offset += length;
191 	if (cookie->vec_offset >= currentVec.length) {
192 		cookie->vec_index++;
193 		cookie->vec_offset = 0;
194 	}
195 
196 	return B_OK;
197 }
198 
199 
200 void
201 IOBuffer::FreeVirtualVecCookie(void* _cookie)
202 {
203 	virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
204 	if (cookie->mapped_area >= 0)
205 		delete_area(cookie->mapped_area);
206 // TODO: A vm_get_physical_page() may still be unmatched!
207 
208 	free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
209 }
210 
211 
212 status_t
213 IOBuffer::LockMemory(team_id team, bool isWrite)
214 {
215 	if (fMemoryLocked) {
216 		panic("memory already locked!");
217 		return B_BAD_VALUE;
218 	}
219 
220 	for (uint32 i = 0; i < fVecCount; i++) {
221 		status_t status = lock_memory_etc(team, (void*)(addr_t)fVecs[i].base,
222 			fVecs[i].length, isWrite ? 0 : B_READ_DEVICE);
223 		if (status != B_OK) {
224 			_UnlockMemory(team, i, isWrite);
225 			return status;
226 		}
227 	}
228 
229 	fMemoryLocked = true;
230 	return B_OK;
231 }
232 
233 
234 void
235 IOBuffer::_UnlockMemory(team_id team, size_t count, bool isWrite)
236 {
237 	for (uint32 i = 0; i < count; i++) {
238 		unlock_memory_etc(team, (void*)(addr_t)fVecs[i].base, fVecs[i].length,
239 			isWrite ? 0 : B_READ_DEVICE);
240 	}
241 }
242 
243 
244 void
245 IOBuffer::UnlockMemory(team_id team, bool isWrite)
246 {
247 	if (!fMemoryLocked) {
248 		panic("memory not locked");
249 		return;
250 	}
251 
252 	_UnlockMemory(team, fVecCount, isWrite);
253 	fMemoryLocked = false;
254 }
255 
256 
257 void
258 IOBuffer::Dump() const
259 {
260 	kprintf("IOBuffer at %p\n", this);
261 
262 	kprintf("  origin:     %s\n", fUser ? "user" : "kernel");
263 	kprintf("  kind:       %s\n", fPhysical ? "physical" : "virtual");
264 	kprintf("  length:     %" B_PRIuGENADDR "\n", fLength);
265 	kprintf("  capacity:   %" B_PRIuSIZE "\n", fCapacity);
266 	kprintf("  vecs:       %" B_PRIuSIZE "\n", fVecCount);
267 
268 	for (uint32 i = 0; i < fVecCount; i++) {
269 		kprintf("    [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
270 			i, fVecs[i].base, fVecs[i].length);
271 	}
272 }
273 
274 
275 // #pragma mark -
276 
277 
278 bool
279 IOOperation::Finish()
280 {
281 	TRACE("IOOperation::Finish()\n");
282 	if (fStatus == B_OK) {
283 		if (fParent->IsWrite()) {
284 			TRACE("  is write\n");
285 			if (fPhase == PHASE_READ_BEGIN) {
286 				TRACE("  phase read begin\n");
287 				// repair phase adjusted vec
288 				fDMABuffer->VecAt(fSavedVecIndex).length = fSavedVecLength;
289 
290 				// partial write: copy partial begin to bounce buffer
291 				bool skipReadEndPhase;
292 				status_t error = _CopyPartialBegin(true, skipReadEndPhase);
293 				if (error == B_OK) {
294 					// We're done with the first phase only (read in begin).
295 					// Get ready for next phase...
296 					fPhase = HasPartialEnd() && !skipReadEndPhase
297 						? PHASE_READ_END : PHASE_DO_ALL;
298 					_PrepareVecs();
299 					ResetStatus();
300 						// TODO: Is there a race condition, if the request is
301 						// aborted at the same time?
302 					return false;
303 				}
304 
305 				SetStatus(error);
306 			} else if (fPhase == PHASE_READ_END) {
307 				TRACE("  phase read end\n");
308 				// repair phase adjusted vec
309 				generic_io_vec& vec = fDMABuffer->VecAt(fSavedVecIndex);
310 				vec.base += vec.length - fSavedVecLength;
311 				vec.length = fSavedVecLength;
312 
313 				// partial write: copy partial end to bounce buffer
314 				status_t error = _CopyPartialEnd(true);
315 				if (error == B_OK) {
316 					// We're done with the second phase only (read in end).
317 					// Get ready for next phase...
318 					fPhase = PHASE_DO_ALL;
319 					ResetStatus();
320 						// TODO: Is there a race condition, if the request is
321 						// aborted at the same time?
322 					return false;
323 				}
324 
325 				SetStatus(error);
326 			}
327 		}
328 	}
329 
330 	if (fParent->IsRead() && UsesBounceBuffer()) {
331 		TRACE("  read with bounce buffer\n");
332 		// copy the bounce buffer segments to the final location
333 		uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
334 		phys_addr_t bounceBufferStart
335 			= fDMABuffer->PhysicalBounceBufferAddress();
336 		phys_addr_t bounceBufferEnd = bounceBufferStart
337 			+ fDMABuffer->BounceBufferSize();
338 
339 		const generic_io_vec* vecs = fDMABuffer->Vecs();
340 		uint32 vecCount = fDMABuffer->VecCount();
341 
342 		status_t error = B_OK;
343 
344 		// We iterate through the vecs we have read, moving offset (the device
345 		// offset) as we go. If [offset, offset + vec.length) intersects with
346 		// [startOffset, endOffset) we copy to the final location.
347 		off_t offset = fOffset;
348 		const off_t startOffset = fOriginalOffset;
349 		const off_t endOffset = fOriginalOffset + fOriginalLength;
350 
351 		for (uint32 i = 0; error == B_OK && i < vecCount; i++) {
352 			const generic_io_vec& vec = vecs[i];
353 			generic_addr_t base = vec.base;
354 			generic_size_t length = vec.length;
355 
356 			if (offset < startOffset) {
357 				// If the complete vector is before the start offset, skip it.
358 				if (offset + (off_t)length <= startOffset) {
359 					offset += length;
360 					continue;
361 				}
362 
363 				// The vector starts before the start offset, but intersects
364 				// with it. Skip the part we aren't interested in.
365 				generic_size_t diff = startOffset - offset;
366 				offset += diff;
367 				base += diff;
368 				length -= diff;
369 			}
370 
371 			if (offset + (off_t)length > endOffset) {
372 				// If we're already beyond the end offset, we're done.
373 				if (offset >= endOffset)
374 					break;
375 
376 				// The vector extends beyond the end offset -- cut it.
377 				length = endOffset - offset;
378 			}
379 
380 			if (base >= bounceBufferStart && base < bounceBufferEnd) {
381 				error = fParent->CopyData(
382 					bounceBuffer + (base - bounceBufferStart), offset, length);
383 			}
384 
385 			offset += length;
386 		}
387 
388 		if (error != B_OK)
389 			SetStatus(error);
390 	}
391 
392 	return true;
393 }
394 
395 
396 /*!	Note: SetPartial() must be called first!
397 */
398 status_t
399 IOOperation::Prepare(IORequest* request)
400 {
401 	if (fParent != NULL)
402 		fParent->RemoveOperation(this);
403 
404 	fParent = request;
405 
406 	fTransferredBytes = 0;
407 
408 	// set initial phase
409 	fPhase = PHASE_DO_ALL;
410 	if (fParent->IsWrite()) {
411 		// Copy data to bounce buffer segments, save the partial begin/end vec,
412 		// which will be copied after their respective read phase.
413 		if (UsesBounceBuffer()) {
414 			TRACE("  write with bounce buffer\n");
415 			uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
416 			phys_addr_t bounceBufferStart
417 				= fDMABuffer->PhysicalBounceBufferAddress();
418 			phys_addr_t bounceBufferEnd = bounceBufferStart
419 				+ fDMABuffer->BounceBufferSize();
420 
421 			const generic_io_vec* vecs = fDMABuffer->Vecs();
422 			uint32 vecCount = fDMABuffer->VecCount();
423 			generic_size_t vecOffset = 0;
424 			uint32 i = 0;
425 
426 			off_t offset = fOffset;
427 			off_t endOffset = fOffset + fLength;
428 
429 			if (HasPartialBegin()) {
430 				// skip first block
431 				generic_size_t toSkip = fBlockSize;
432 				while (toSkip > 0) {
433 					if (vecs[i].length <= toSkip) {
434 						toSkip -= vecs[i].length;
435 						i++;
436 					} else {
437 						vecOffset = toSkip;
438 						break;
439 					}
440 				}
441 
442 				offset += fBlockSize;
443 			}
444 
445 			if (HasPartialEnd()) {
446 				// skip last block
447 				generic_size_t toSkip = fBlockSize;
448 				while (toSkip > 0) {
449 					if (vecs[vecCount - 1].length <= toSkip) {
450 						toSkip -= vecs[vecCount - 1].length;
451 						vecCount--;
452 					} else
453 						break;
454 				}
455 
456 				endOffset -= fBlockSize;
457 			}
458 
459 			for (; i < vecCount; i++) {
460 				const generic_io_vec& vec = vecs[i];
461 				generic_addr_t base = vec.base + vecOffset;
462 				generic_size_t length = vec.length - vecOffset;
463 				vecOffset = 0;
464 
465 				if (base >= bounceBufferStart && base < bounceBufferEnd) {
466 					if (offset + (off_t)length > endOffset)
467 						length = endOffset - offset;
468 					status_t error = fParent->CopyData(offset,
469 						bounceBuffer + (base - bounceBufferStart), length);
470 					if (error != B_OK)
471 						return error;
472 				}
473 
474 				offset += length;
475 			}
476 		}
477 
478 		if (HasPartialBegin())
479 			fPhase = PHASE_READ_BEGIN;
480 		else if (HasPartialEnd())
481 			fPhase = PHASE_READ_END;
482 
483 		_PrepareVecs();
484 	}
485 
486 	ResetStatus();
487 
488 	if (fParent != NULL)
489 		fParent->AddOperation(this);
490 
491 	return B_OK;
492 }
493 
494 
495 void
496 IOOperation::SetOriginalRange(off_t offset, generic_size_t length)
497 {
498 	fOriginalOffset = fOffset = offset;
499 	fOriginalLength = fLength = length;
500 }
501 
502 
503 void
504 IOOperation::SetRange(off_t offset, generic_size_t length)
505 {
506 	fOffset = offset;
507 	fLength = length;
508 }
509 
510 
511 off_t
512 IOOperation::Offset() const
513 {
514 	return fPhase == PHASE_READ_END ? fOffset + fLength - fBlockSize : fOffset;
515 }
516 
517 
518 generic_size_t
519 IOOperation::Length() const
520 {
521 	return fPhase == PHASE_DO_ALL ? fLength : fBlockSize;
522 }
523 
524 
525 generic_io_vec*
526 IOOperation::Vecs() const
527 {
528 	switch (fPhase) {
529 		case PHASE_READ_END:
530 			return fDMABuffer->Vecs() + fSavedVecIndex;
531 		case PHASE_READ_BEGIN:
532 		case PHASE_DO_ALL:
533 		default:
534 			return fDMABuffer->Vecs();
535 	}
536 }
537 
538 
539 uint32
540 IOOperation::VecCount() const
541 {
542 	switch (fPhase) {
543 		case PHASE_READ_BEGIN:
544 			return fSavedVecIndex + 1;
545 		case PHASE_READ_END:
546 			return fDMABuffer->VecCount() - fSavedVecIndex;
547 		case PHASE_DO_ALL:
548 		default:
549 			return fDMABuffer->VecCount();
550 	}
551 }
552 
553 
554 void
555 IOOperation::SetPartial(bool partialBegin, bool partialEnd)
556 {
557 	TRACE("partial begin %d, end %d\n", partialBegin, partialEnd);
558 	fPartialBegin = partialBegin;
559 	fPartialEnd = partialEnd;
560 }
561 
562 
563 bool
564 IOOperation::IsWrite() const
565 {
566 	return fParent->IsWrite() && fPhase == PHASE_DO_ALL;
567 }
568 
569 
570 bool
571 IOOperation::IsRead() const
572 {
573 	return fParent->IsRead();
574 }
575 
576 
577 void
578 IOOperation::_PrepareVecs()
579 {
580 	// we need to prepare the vecs for consumption by the drivers
581 	if (fPhase == PHASE_READ_BEGIN) {
582 		generic_io_vec* vecs = fDMABuffer->Vecs();
583 		uint32 vecCount = fDMABuffer->VecCount();
584 		generic_size_t vecLength = fBlockSize;
585 		for (uint32 i = 0; i < vecCount; i++) {
586 			generic_io_vec& vec = vecs[i];
587 			if (vec.length >= vecLength) {
588 				fSavedVecIndex = i;
589 				fSavedVecLength = vec.length;
590 				vec.length = vecLength;
591 				break;
592 			}
593 			vecLength -= vec.length;
594 		}
595 	} else if (fPhase == PHASE_READ_END) {
596 		generic_io_vec* vecs = fDMABuffer->Vecs();
597 		uint32 vecCount = fDMABuffer->VecCount();
598 		generic_size_t vecLength = fBlockSize;
599 		for (int32 i = vecCount - 1; i >= 0; i--) {
600 			generic_io_vec& vec = vecs[i];
601 			if (vec.length >= vecLength) {
602 				fSavedVecIndex = i;
603 				fSavedVecLength = vec.length;
604 				vec.base += vec.length - vecLength;
605 				vec.length = vecLength;
606 				break;
607 			}
608 			vecLength -= vec.length;
609 		}
610 	}
611 }
612 
613 
614 status_t
615 IOOperation::_CopyPartialBegin(bool isWrite, bool& singleBlockOnly)
616 {
617 	generic_size_t relativeOffset = OriginalOffset() - fOffset;
618 	generic_size_t length = fBlockSize - relativeOffset;
619 
620 	singleBlockOnly = length >= OriginalLength();
621 	if (singleBlockOnly)
622 		length = OriginalLength();
623 
624 	TRACE("_CopyPartialBegin(%s, single only %d)\n",
625 		isWrite ? "write" : "read", singleBlockOnly);
626 
627 	if (isWrite) {
628 		return fParent->CopyData(OriginalOffset(),
629 			(uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset, length);
630 	} else {
631 		return fParent->CopyData(
632 			(uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset,
633 			OriginalOffset(), length);
634 	}
635 }
636 
637 
638 status_t
639 IOOperation::_CopyPartialEnd(bool isWrite)
640 {
641 	TRACE("_CopyPartialEnd(%s)\n", isWrite ? "write" : "read");
642 
643 	const generic_io_vec& lastVec
644 		= fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
645 	off_t lastVecPos = fOffset + fLength - fBlockSize;
646 	uint8* base = (uint8*)fDMABuffer->BounceBufferAddress()
647 		+ (lastVec.base + lastVec.length - fBlockSize
648 		- fDMABuffer->PhysicalBounceBufferAddress());
649 		// NOTE: this won't work if we don't use the bounce buffer contiguously
650 		// (because of boundary alignments).
651 	generic_size_t length = OriginalOffset() + OriginalLength() - lastVecPos;
652 
653 	if (isWrite)
654 		return fParent->CopyData(lastVecPos, base, length);
655 
656 	return fParent->CopyData(base, lastVecPos, length);
657 }
658 
659 
660 void
661 IOOperation::Dump() const
662 {
663 	kprintf("io_operation at %p\n", this);
664 
665 	kprintf("  parent:           %p\n", fParent);
666 	kprintf("  status:           %s\n", strerror(fStatus));
667 	kprintf("  dma buffer:       %p\n", fDMABuffer);
668 	kprintf("  offset:           %-8" B_PRIdOFF " (original: %" B_PRIdOFF ")\n",
669 		fOffset, fOriginalOffset);
670 	kprintf("  length:           %-8" B_PRIuGENADDR " (original: %"
671 		B_PRIuGENADDR ")\n", fLength, fOriginalLength);
672 	kprintf("  transferred:      %" B_PRIuGENADDR "\n", fTransferredBytes);
673 	kprintf("  block size:       %" B_PRIuGENADDR "\n", fBlockSize);
674 	kprintf("  saved vec index:  %u\n", fSavedVecIndex);
675 	kprintf("  saved vec length: %u\n", fSavedVecLength);
676 	kprintf("  r/w:              %s\n", IsWrite() ? "write" : "read");
677 	kprintf("  phase:            %s\n", fPhase == PHASE_READ_BEGIN
678 		? "read begin" : fPhase == PHASE_READ_END ? "read end"
679 		: fPhase == PHASE_DO_ALL ? "do all" : "unknown");
680 	kprintf("  partial begin:    %s\n", fPartialBegin ? "yes" : "no");
681 	kprintf("  partial end:      %s\n", fPartialEnd ? "yes" : "no");
682 	kprintf("  bounce buffer:    %s\n", fUsesBounceBuffer ? "yes" : "no");
683 
684 	set_debug_variable("_parent", (addr_t)fParent);
685 	set_debug_variable("_buffer", (addr_t)fDMABuffer);
686 }
687 
688 
689 // #pragma mark -
690 
691 
692 IORequest::IORequest()
693 	:
694 	fIsNotified(false),
695 	fFinishedCallback(NULL),
696 	fFinishedCookie(NULL),
697 	fIterationCallback(NULL),
698 	fIterationCookie(NULL)
699 {
700 	mutex_init(&fLock, "I/O request lock");
701 	fFinishedCondition.Init(this, "I/O request finished");
702 }
703 
704 
705 IORequest::~IORequest()
706 {
707 	mutex_lock(&fLock);
708 	DeleteSubRequests();
709 	fBuffer->Delete();
710 	mutex_destroy(&fLock);
711 }
712 
713 
714 /* static */ IORequest*
715 IORequest::Create(bool vip)
716 {
717 	return vip
718 		? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
719 		: new(std::nothrow) IORequest;
720 }
721 
722 
723 status_t
724 IORequest::Init(off_t offset, generic_addr_t buffer, generic_size_t length,
725 	bool write, uint32 flags)
726 {
727 	ASSERT(offset >= 0);
728 
729 	generic_io_vec vec;
730 	vec.base = buffer;
731 	vec.length = length;
732 	return Init(offset, &vec, 1, length, write, flags);
733 }
734 
735 
736 status_t
737 IORequest::Init(off_t offset, generic_size_t firstVecOffset,
738 	const generic_io_vec* vecs, size_t count, generic_size_t length, bool write,
739 	uint32 flags)
740 {
741 	ASSERT(offset >= 0);
742 
743 	fBuffer = IOBuffer::Create(count, (flags & B_VIP_IO_REQUEST) != 0);
744 	if (fBuffer == NULL)
745 		return B_NO_MEMORY;
746 
747 	fBuffer->SetVecs(firstVecOffset, vecs, count, length, flags);
748 
749 	fOwner = NULL;
750 	fOffset = offset;
751 	fLength = length;
752 	fRelativeParentOffset = 0;
753 	fTransferSize = 0;
754 	fFlags = flags;
755 	Thread* thread = thread_get_current_thread();
756 	fTeam = thread->team->id;
757 	fThread = thread->id;
758 	fIsWrite = write;
759 	fPartialTransfer = false;
760 	fSuppressChildNotifications = false;
761 
762 	// these are for iteration
763 	fVecIndex = 0;
764 	fVecOffset = 0;
765 	fRemainingBytes = length;
766 
767 	fPendingChildren = 0;
768 
769 	fStatus = 1;
770 
771 	return B_OK;
772 }
773 
774 
775 status_t
776 IORequest::CreateSubRequest(off_t parentOffset, off_t offset,
777 	generic_size_t length, IORequest*& _subRequest)
778 {
779 	ASSERT(parentOffset >= fOffset && length <= fLength
780 		&& parentOffset - fOffset <= (off_t)(fLength - length));
781 
782 	// find start vec
783 	generic_size_t vecOffset = parentOffset - fOffset;
784 	generic_io_vec* vecs = fBuffer->Vecs();
785 	int32 vecCount = fBuffer->VecCount();
786 	int32 startVec = 0;
787 	for (; startVec < vecCount; startVec++) {
788 		const generic_io_vec& vec = vecs[startVec];
789 		if (vecOffset < vec.length)
790 			break;
791 
792 		vecOffset -= vec.length;
793 	}
794 
795 	// count vecs
796 	generic_size_t currentVecOffset = vecOffset;
797 	int32 endVec = startVec;
798 	generic_size_t remainingLength = length;
799 	for (; endVec < vecCount; endVec++) {
800 		const generic_io_vec& vec = vecs[endVec];
801 		if (vec.length - currentVecOffset >= remainingLength)
802 			break;
803 
804 		remainingLength -= vec.length - currentVecOffset;
805 		currentVecOffset = 0;
806 	}
807 
808 	// create subrequest
809 	IORequest* subRequest = Create((fFlags & B_VIP_IO_REQUEST) != 0);
810 	if (subRequest == NULL)
811 		return B_NO_MEMORY;
812 
813 	status_t error = subRequest->Init(offset, vecOffset, vecs + startVec,
814 		endVec - startVec + 1, length, fIsWrite, fFlags & ~B_DELETE_IO_REQUEST);
815 	if (error != B_OK) {
816 		delete subRequest;
817 		return error;
818 	}
819 
820 	subRequest->fRelativeParentOffset = parentOffset - fOffset;
821 	subRequest->fTeam = fTeam;
822 	subRequest->fThread = fThread;
823 
824 	_subRequest = subRequest;
825 	subRequest->SetParent(this);
826 
827 	MutexLocker _(fLock);
828 
829 	fChildren.Add(subRequest);
830 	fPendingChildren++;
831 	TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
832 		subRequest);
833 
834 	return B_OK;
835 }
836 
837 
838 void
839 IORequest::DeleteSubRequests()
840 {
841 	while (IORequestChunk* chunk = fChildren.RemoveHead())
842 		delete chunk;
843 	fPendingChildren = 0;
844 }
845 
846 
847 void
848 IORequest::SetFinishedCallback(io_request_finished_callback callback,
849 	void* cookie)
850 {
851 	fFinishedCallback = callback;
852 	fFinishedCookie = cookie;
853 }
854 
855 
856 void
857 IORequest::SetIterationCallback(io_request_iterate_callback callback,
858 	void* cookie)
859 {
860 	fIterationCallback = callback;
861 	fIterationCookie = cookie;
862 }
863 
864 
865 io_request_finished_callback
866 IORequest::FinishedCallback(void** _cookie) const
867 {
868 	if (_cookie != NULL)
869 		*_cookie = fFinishedCookie;
870 	return fFinishedCallback;
871 }
872 
873 
874 status_t
875 IORequest::Wait(uint32 flags, bigtime_t timeout)
876 {
877 	MutexLocker locker(fLock);
878 
879 	if (IsFinished() && fIsNotified)
880 		return Status();
881 
882 	ConditionVariableEntry entry;
883 	fFinishedCondition.Add(&entry);
884 
885 	locker.Unlock();
886 
887 	status_t error = entry.Wait(flags, timeout);
888 	if (error != B_OK)
889 		return error;
890 
891 	return Status();
892 }
893 
894 
895 void
896 IORequest::NotifyFinished()
897 {
898 	TRACE("IORequest::NotifyFinished(): request: %p\n", this);
899 
900 	MutexLocker locker(fLock);
901 
902 	if (fStatus == B_OK && !fPartialTransfer && RemainingBytes() > 0) {
903 		// The request is not really done yet. If it has an iteration callback,
904 		// call it.
905 		if (fIterationCallback != NULL) {
906 			ResetStatus();
907 			locker.Unlock();
908 			bool partialTransfer = false;
909 			status_t error = fIterationCallback(fIterationCookie, this,
910 				&partialTransfer);
911 			if (error == B_OK && !partialTransfer)
912 				return;
913 
914 			// Iteration failed, which means we're responsible for notifying the
915 			// requests finished.
916 			locker.Lock();
917 			fStatus = error;
918 			fPartialTransfer = true;
919 		}
920 	}
921 
922 	ASSERT(!fIsNotified);
923 	ASSERT(fPendingChildren == 0);
924 	ASSERT(fChildren.IsEmpty()
925 		|| dynamic_cast<IOOperation*>(fChildren.Head()) == NULL);
926 
927 	// unlock the memory
928 	if (fBuffer->IsMemoryLocked())
929 		fBuffer->UnlockMemory(fTeam, fIsWrite);
930 
931 	// Cache the callbacks before we unblock waiters and unlock. Any of the
932 	// following could delete this request, so we don't want to touch it
933 	// once we have started telling others that it is done.
934 	IORequest* parent = fParent;
935 	io_request_finished_callback finishedCallback = fFinishedCallback;
936 	void* finishedCookie = fFinishedCookie;
937 	status_t status = fStatus;
938 	generic_size_t lastTransferredOffset
939 		= fRelativeParentOffset + fTransferSize;
940 	bool partialTransfer = status != B_OK || fPartialTransfer;
941 	bool deleteRequest = (fFlags & B_DELETE_IO_REQUEST) != 0;
942 
943 	// unblock waiters
944 	fIsNotified = true;
945 	fFinishedCondition.NotifyAll();
946 
947 	locker.Unlock();
948 
949 	// notify callback
950 	if (finishedCallback != NULL) {
951 		finishedCallback(finishedCookie, this, status, partialTransfer,
952 			lastTransferredOffset);
953 	}
954 
955 	// notify parent
956 	if (parent != NULL) {
957 		parent->SubRequestFinished(this, status, partialTransfer,
958 			lastTransferredOffset);
959 	}
960 
961 	if (deleteRequest)
962 		delete this;
963 }
964 
965 
966 /*!	Returns whether this request or any of it's ancestors has a finished or
967 	notification callback. Used to decide whether NotifyFinished() can be called
968 	synchronously.
969 */
970 bool
971 IORequest::HasCallbacks() const
972 {
973 	if (fFinishedCallback != NULL || fIterationCallback != NULL)
974 		return true;
975 
976 	return fParent != NULL && fParent->HasCallbacks();
977 }
978 
979 
980 void
981 IORequest::SetStatusAndNotify(status_t status)
982 {
983 	MutexLocker locker(fLock);
984 
985 	if (fStatus != 1)
986 		return;
987 
988 	fStatus = status;
989 
990 	locker.Unlock();
991 
992 	NotifyFinished();
993 }
994 
995 
996 void
997 IORequest::OperationFinished(IOOperation* operation, status_t status,
998 	bool partialTransfer, generic_size_t transferEndOffset)
999 {
1000 	TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32 "): request: %p\n",
1001 		operation, status, this);
1002 
1003 	MutexLocker locker(fLock);
1004 
1005 	fChildren.Remove(operation);
1006 	operation->SetParent(NULL);
1007 
1008 	if (status != B_OK || partialTransfer) {
1009 		if (fTransferSize > transferEndOffset)
1010 			fTransferSize = transferEndOffset;
1011 		fPartialTransfer = true;
1012 	}
1013 
1014 	if (status != B_OK && fStatus == 1)
1015 		fStatus = status;
1016 
1017 	if (--fPendingChildren > 0)
1018 		return;
1019 
1020 	// last child finished
1021 
1022 	// set status, if not done yet
1023 	if (fStatus == 1)
1024 		fStatus = B_OK;
1025 }
1026 
1027 
1028 void
1029 IORequest::SubRequestFinished(IORequest* request, status_t status,
1030 	bool partialTransfer, generic_size_t transferEndOffset)
1031 {
1032 	TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32 ", %d, %"
1033 		B_PRIuGENADDR "): request: %p\n", request, status, partialTransfer, transferEndOffset, this);
1034 
1035 	MutexLocker locker(fLock);
1036 
1037 	if (status != B_OK || partialTransfer) {
1038 		if (fTransferSize > transferEndOffset)
1039 			fTransferSize = transferEndOffset;
1040 		fPartialTransfer = true;
1041 	}
1042 
1043 	if (status != B_OK && fStatus == 1)
1044 		fStatus = status;
1045 
1046 	if (--fPendingChildren > 0 || fSuppressChildNotifications)
1047 		return;
1048 
1049 	// last child finished
1050 
1051 	// set status, if not done yet
1052 	if (fStatus == 1)
1053 		fStatus = B_OK;
1054 
1055 	locker.Unlock();
1056 
1057 	NotifyFinished();
1058 }
1059 
1060 
1061 void
1062 IORequest::SetUnfinished()
1063 {
1064 	MutexLocker _(fLock);
1065 	ResetStatus();
1066 }
1067 
1068 
1069 void
1070 IORequest::SetTransferredBytes(bool partialTransfer,
1071 	generic_size_t transferredBytes)
1072 {
1073 	TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR ")\n", this,
1074 		partialTransfer, transferredBytes);
1075 
1076 	MutexLocker _(fLock);
1077 
1078 	fPartialTransfer = partialTransfer;
1079 	fTransferSize = transferredBytes;
1080 }
1081 
1082 
1083 void
1084 IORequest::SetSuppressChildNotifications(bool suppress)
1085 {
1086 	fSuppressChildNotifications = suppress;
1087 }
1088 
1089 
1090 void
1091 IORequest::Advance(generic_size_t bySize)
1092 {
1093 	TRACE("IORequest::Advance(%" B_PRIuGENADDR "): remaining: %" B_PRIuGENADDR
1094 		" -> %" B_PRIuGENADDR "\n", bySize, fRemainingBytes,
1095 		fRemainingBytes - bySize);
1096 	fRemainingBytes -= bySize;
1097 	fTransferSize += bySize;
1098 
1099 	generic_io_vec* vecs = fBuffer->Vecs();
1100 	uint32 vecCount = fBuffer->VecCount();
1101 	while (fVecIndex < vecCount
1102 			&& vecs[fVecIndex].length - fVecOffset <= bySize) {
1103 		bySize -= vecs[fVecIndex].length - fVecOffset;
1104 		fVecOffset = 0;
1105 		fVecIndex++;
1106 	}
1107 
1108 	fVecOffset += bySize;
1109 }
1110 
1111 
1112 IORequest*
1113 IORequest::FirstSubRequest()
1114 {
1115 	return dynamic_cast<IORequest*>(fChildren.Head());
1116 }
1117 
1118 
1119 IORequest*
1120 IORequest::NextSubRequest(IORequest* previous)
1121 {
1122 	if (previous == NULL)
1123 		return NULL;
1124 	return dynamic_cast<IORequest*>(fChildren.GetNext(previous));
1125 }
1126 
1127 
1128 void
1129 IORequest::AddOperation(IOOperation* operation)
1130 {
1131 	MutexLocker locker(fLock);
1132 	TRACE("IORequest::AddOperation(%p): request: %p\n", operation, this);
1133 	fChildren.Add(operation);
1134 	fPendingChildren++;
1135 }
1136 
1137 
1138 void
1139 IORequest::RemoveOperation(IOOperation* operation)
1140 {
1141 	MutexLocker locker(fLock);
1142 	fChildren.Remove(operation);
1143 	operation->SetParent(NULL);
1144 }
1145 
1146 
1147 status_t
1148 IORequest::CopyData(off_t offset, void* buffer, size_t size)
1149 {
1150 	return _CopyData(buffer, offset, size, true);
1151 }
1152 
1153 
1154 status_t
1155 IORequest::CopyData(const void* buffer, off_t offset, size_t size)
1156 {
1157 	return _CopyData((void*)buffer, offset, size, false);
1158 }
1159 
1160 
1161 status_t
1162 IORequest::ClearData(off_t offset, generic_size_t size)
1163 {
1164 	if (size == 0)
1165 		return B_OK;
1166 
1167 	if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1168 		panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1169 			", %" B_PRIuGENADDR ")", offset, size);
1170 		return B_BAD_VALUE;
1171 	}
1172 
1173 	// If we can, we directly copy from/to the virtual buffer. The memory is
1174 	// locked in this case.
1175 	status_t (*clearFunction)(generic_addr_t, generic_size_t, team_id);
1176 	if (fBuffer->IsPhysical()) {
1177 		clearFunction = &IORequest::_ClearDataPhysical;
1178 	} else {
1179 		clearFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1180 			? &IORequest::_ClearDataUser : &IORequest::_ClearDataSimple;
1181 	}
1182 
1183 	// skip bytes if requested
1184 	generic_io_vec* vecs = fBuffer->Vecs();
1185 	generic_size_t skipBytes = offset - fOffset;
1186 	generic_size_t vecOffset = 0;
1187 	while (skipBytes > 0) {
1188 		if (vecs[0].length > skipBytes) {
1189 			vecOffset = skipBytes;
1190 			break;
1191 		}
1192 
1193 		skipBytes -= vecs[0].length;
1194 		vecs++;
1195 	}
1196 
1197 	// clear vector-wise
1198 	while (size > 0) {
1199 		generic_size_t toClear = min_c(size, vecs[0].length - vecOffset);
1200 		status_t error = clearFunction(vecs[0].base + vecOffset, toClear,
1201 			fTeam);
1202 		if (error != B_OK)
1203 			return error;
1204 
1205 		size -= toClear;
1206 		vecs++;
1207 		vecOffset = 0;
1208 	}
1209 
1210 	return B_OK;
1211 
1212 }
1213 
1214 
1215 status_t
1216 IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
1217 {
1218 	if (size == 0)
1219 		return B_OK;
1220 
1221 	uint8* buffer = (uint8*)_buffer;
1222 
1223 	if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1224 		panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF ", %lu)",
1225 			offset, size);
1226 		return B_BAD_VALUE;
1227 	}
1228 
1229 	// If we can, we directly copy from/to the virtual buffer. The memory is
1230 	// locked in this case.
1231 	status_t (*copyFunction)(void*, generic_addr_t, size_t, team_id, bool);
1232 	if (fBuffer->IsPhysical()) {
1233 		copyFunction = &IORequest::_CopyPhysical;
1234 	} else {
1235 		copyFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1236 			? &IORequest::_CopyUser : &IORequest::_CopySimple;
1237 	}
1238 
1239 	// skip bytes if requested
1240 	generic_io_vec* vecs = fBuffer->Vecs();
1241 	generic_size_t skipBytes = offset - fOffset;
1242 	generic_size_t vecOffset = 0;
1243 	while (skipBytes > 0) {
1244 		if (vecs[0].length > skipBytes) {
1245 			vecOffset = skipBytes;
1246 			break;
1247 		}
1248 
1249 		skipBytes -= vecs[0].length;
1250 		vecs++;
1251 	}
1252 
1253 	// copy vector-wise
1254 	while (size > 0) {
1255 		generic_size_t toCopy = min_c(size, vecs[0].length - vecOffset);
1256 		status_t error = copyFunction(buffer, vecs[0].base + vecOffset, toCopy,
1257 			fTeam, copyIn);
1258 		if (error != B_OK)
1259 			return error;
1260 
1261 		buffer += toCopy;
1262 		size -= toCopy;
1263 		vecs++;
1264 		vecOffset = 0;
1265 	}
1266 
1267 	return B_OK;
1268 }
1269 
1270 
1271 /* static */ status_t
1272 IORequest::_CopySimple(void* bounceBuffer, generic_addr_t external, size_t size,
1273 	team_id team, bool copyIn)
1274 {
1275 	TRACE("  IORequest::_CopySimple(%p, %#" B_PRIxGENADDR ", %lu, %d)\n",
1276 		bounceBuffer, external, size, copyIn);
1277 	if (copyIn)
1278 		memcpy(bounceBuffer, (void*)(addr_t)external, size);
1279 	else
1280 		memcpy((void*)(addr_t)external, bounceBuffer, size);
1281 	return B_OK;
1282 }
1283 
1284 
1285 /* static */ status_t
1286 IORequest::_CopyPhysical(void* bounceBuffer, generic_addr_t external,
1287 	size_t size, team_id team, bool copyIn)
1288 {
1289 	if (copyIn)
1290 		return vm_memcpy_from_physical(bounceBuffer, external, size, false);
1291 
1292 	return vm_memcpy_to_physical(external, bounceBuffer, size, false);
1293 }
1294 
1295 
1296 /* static */ status_t
1297 IORequest::_CopyUser(void* _bounceBuffer, generic_addr_t _external, size_t size,
1298 	team_id team, bool copyIn)
1299 {
1300 	uint8* bounceBuffer = (uint8*)_bounceBuffer;
1301 	uint8* external = (uint8*)(addr_t)_external;
1302 
1303 	while (size > 0) {
1304 		static const int32 kEntryCount = 8;
1305 		physical_entry entries[kEntryCount];
1306 
1307 		uint32 count = kEntryCount;
1308 		status_t error = get_memory_map_etc(team, external, size, entries,
1309 			&count);
1310 		if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1311 			panic("IORequest::_CopyUser(): Failed to get physical memory for "
1312 				"user memory %p\n", external);
1313 			return B_BAD_ADDRESS;
1314 		}
1315 
1316 		for (uint32 i = 0; i < count; i++) {
1317 			const physical_entry& entry = entries[i];
1318 			error = _CopyPhysical(bounceBuffer, entry.address, entry.size, team,
1319 				copyIn);
1320 			if (error != B_OK)
1321 				return error;
1322 
1323 			size -= entry.size;
1324 			bounceBuffer += entry.size;
1325 			external += entry.size;
1326 		}
1327 	}
1328 
1329 	return B_OK;
1330 }
1331 
1332 
1333 /*static*/ status_t
1334 IORequest::_ClearDataSimple(generic_addr_t external, generic_size_t size,
1335 	team_id team)
1336 {
1337 	memset((void*)(addr_t)external, 0, (size_t)size);
1338 	return B_OK;
1339 }
1340 
1341 
1342 /*static*/ status_t
1343 IORequest::_ClearDataPhysical(generic_addr_t external, generic_size_t size,
1344 	team_id team)
1345 {
1346 	return vm_memset_physical((phys_addr_t)external, 0, (phys_size_t)size);
1347 }
1348 
1349 
1350 /*static*/ status_t
1351 IORequest::_ClearDataUser(generic_addr_t _external, generic_size_t size,
1352 	team_id team)
1353 {
1354 	uint8* external = (uint8*)(addr_t)_external;
1355 
1356 	while (size > 0) {
1357 		static const int32 kEntryCount = 8;
1358 		physical_entry entries[kEntryCount];
1359 
1360 		uint32 count = kEntryCount;
1361 		status_t error = get_memory_map_etc(team, external, size, entries,
1362 			&count);
1363 		if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1364 			panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1365 				"for user memory %p\n", external);
1366 			return B_BAD_ADDRESS;
1367 		}
1368 
1369 		for (uint32 i = 0; i < count; i++) {
1370 			const physical_entry& entry = entries[i];
1371 			error = _ClearDataPhysical(entry.address, entry.size, team);
1372 			if (error != B_OK)
1373 				return error;
1374 
1375 			size -= entry.size;
1376 			external += entry.size;
1377 		}
1378 	}
1379 
1380 	return B_OK;
1381 }
1382 
1383 
1384 void
1385 IORequest::Dump() const
1386 {
1387 	kprintf("io_request at %p\n", this);
1388 
1389 	kprintf("  owner:             %p\n", fOwner);
1390 	kprintf("  parent:            %p\n", fParent);
1391 	kprintf("  status:            %s\n", strerror(fStatus));
1392 	kprintf("  mutex:             %p\n", &fLock);
1393 	kprintf("  IOBuffer:          %p\n", fBuffer);
1394 	kprintf("  offset:            %" B_PRIdOFF "\n", fOffset);
1395 	kprintf("  length:            %" B_PRIuGENADDR "\n", fLength);
1396 	kprintf("  transfer size:     %" B_PRIuGENADDR "\n", fTransferSize);
1397 	kprintf("  relative offset:   %" B_PRIuGENADDR "\n", fRelativeParentOffset);
1398 	kprintf("  pending children:  %" B_PRId32 "\n", fPendingChildren);
1399 	kprintf("  flags:             %#" B_PRIx32 "\n", fFlags);
1400 	kprintf("  team:              %" B_PRId32 "\n", fTeam);
1401 	kprintf("  thread:            %" B_PRId32 "\n", fThread);
1402 	kprintf("  r/w:               %s\n", fIsWrite ? "write" : "read");
1403 	kprintf("  partial transfer:  %s\n", fPartialTransfer ? "yes" : "no");
1404 	kprintf("  finished cvar:     %p\n", &fFinishedCondition);
1405 	kprintf("  iteration:\n");
1406 	kprintf("    vec index:       %" B_PRIu32 "\n", fVecIndex);
1407 	kprintf("    vec offset:      %" B_PRIuGENADDR "\n", fVecOffset);
1408 	kprintf("    remaining bytes: %" B_PRIuGENADDR "\n", fRemainingBytes);
1409 	kprintf("  callbacks:\n");
1410 	kprintf("    finished %p, cookie %p\n", fFinishedCallback, fFinishedCookie);
1411 	kprintf("    iteration %p, cookie %p\n", fIterationCallback,
1412 		fIterationCookie);
1413 	kprintf("  children:\n");
1414 
1415 	IORequestChunkList::ConstIterator iterator = fChildren.GetIterator();
1416 	while (iterator.HasNext()) {
1417 		kprintf("    %p\n", iterator.Next());
1418 	}
1419 
1420 	set_debug_variable("_parent", (addr_t)fParent);
1421 	set_debug_variable("_mutex", (addr_t)&fLock);
1422 	set_debug_variable("_buffer", (addr_t)fBuffer);
1423 	set_debug_variable("_cvar", (addr_t)&fFinishedCondition);
1424 }
1425