xref: /haiku/src/system/kernel/device_manager/dma_resources.cpp (revision 746cac055adc6ac3308c7bc2d29040fb95689cc9)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  */
6 
7 #include "dma_resources.h"
8 
9 #include <device_manager.h>
10 
11 #include <kernel.h>
12 #include <util/AutoLock.h>
13 
14 #include "io_requests.h"
15 
16 
17 //#define TRACE_DMA_RESOURCE
18 #ifdef TRACE_DMA_RESOURCE
19 #	define TRACE(x...) dprintf(x)
20 #else
21 #	define TRACE(x...) ;
22 #endif
23 
24 
25 extern device_manager_info gDeviceManagerModule;
26 
27 const size_t kMaxBounceBufferSize = 4 * B_PAGE_SIZE;
28 
29 
30 DMABuffer*
31 DMABuffer::Create(size_t count)
32 {
33 	DMABuffer* buffer = (DMABuffer*)malloc(
34 		sizeof(DMABuffer) + sizeof(iovec) * (count - 1));
35 	if (buffer == NULL)
36 		return NULL;
37 
38 	buffer->fVecCount = count;
39 
40 	return buffer;
41 }
42 
43 
44 void
45 DMABuffer::SetVecCount(uint32 count)
46 {
47 	fVecCount = count;
48 }
49 
50 
51 void
52 DMABuffer::AddVec(void* base, size_t size)
53 {
54 	iovec& vec = fVecs[fVecCount++];
55 	vec.iov_base = base;
56 	vec.iov_len = size;
57 }
58 
59 
60 bool
61 DMABuffer::UsesBounceBufferAt(uint32 index)
62 {
63 	if (index >= fVecCount || fBounceBuffer == NULL)
64 		return false;
65 
66 	return (addr_t)fVecs[index].iov_base >= fBounceBuffer->physical_address
67 		&& (addr_t)fVecs[index].iov_base
68 				< fBounceBuffer->physical_address + fBounceBuffer->size;
69 }
70 
71 
72 void
73 DMABuffer::Dump() const
74 {
75 	kprintf("DMABuffer at %p\n", this);
76 
77 	kprintf("  bounce buffer:      %p (physical %#lx)\n",
78 		fBounceBuffer->address, fBounceBuffer->physical_address);
79 	kprintf("  bounce buffer size: %lu\n", fBounceBuffer->size);
80 	kprintf("  vecs:               %lu\n", fVecCount);
81 
82 	for (uint32 i = 0; i < fVecCount; i++) {
83 		kprintf("    [%lu] %p, %lu\n", i, fVecs[i].iov_base, fVecs[i].iov_len);
84 	}
85 }
86 
87 
88 //	#pragma mark -
89 
90 
91 DMAResource::DMAResource()
92 {
93 	mutex_init(&fLock, "dma resource");
94 }
95 
96 
97 DMAResource::~DMAResource()
98 {
99 	mutex_destroy(&fLock);
100 	free(fScratchVecs);
101 
102 // TODO: Delete DMABuffers and BounceBuffers!
103 }
104 
105 
106 status_t
107 DMAResource::Init(device_node* node, size_t blockSize, uint32 bufferCount,
108 	uint32 bounceBufferCount)
109 {
110 	dma_restrictions restrictions;
111 	memset(&restrictions, 0, sizeof(dma_restrictions));
112 
113 	// TODO: add DMA attributes instead of reusing block_io's
114 
115 	uint32 value;
116 	if (gDeviceManagerModule.get_attr_uint32(node,
117 			B_DMA_ALIGNMENT, &value, true) == B_OK)
118 		restrictions.alignment = value + 1;
119 
120 	if (gDeviceManagerModule.get_attr_uint32(node,
121 			B_DMA_BOUNDARY, &value, true) == B_OK)
122 		restrictions.boundary = value + 1;
123 
124 	if (gDeviceManagerModule.get_attr_uint32(node,
125 			B_DMA_MAX_SEGMENT_BLOCKS, &value, true) == B_OK)
126 		restrictions.max_segment_size = value * blockSize;
127 
128 	if (gDeviceManagerModule.get_attr_uint32(node,
129 			B_DMA_MAX_TRANSFER_BLOCKS, &value, true) == B_OK)
130 		restrictions.max_transfer_size = value * blockSize;
131 
132 	if (gDeviceManagerModule.get_attr_uint32(node,
133 			B_DMA_MAX_SEGMENT_COUNT, &value, true) == B_OK)
134 		restrictions.max_segment_count = value;
135 
136 	return Init(restrictions, blockSize, bufferCount, bounceBufferCount);
137 }
138 
139 
140 status_t
141 DMAResource::Init(const dma_restrictions& restrictions, size_t blockSize,
142 	uint32 bufferCount, uint32 bounceBufferCount)
143 {
144 	fRestrictions = restrictions;
145 	fBlockSize = blockSize == 0 ? 1 : blockSize;
146 	fBufferCount = bufferCount;
147 	fBounceBufferCount = bounceBufferCount;
148 	fBounceBufferSize = 0;
149 
150 	if (fRestrictions.high_address == 0)
151 		fRestrictions.high_address = ~(addr_t)0;
152 	if (fRestrictions.max_segment_count == 0)
153 		fRestrictions.max_segment_count = 16;
154 	if (fRestrictions.alignment == 0)
155 		fRestrictions.alignment = 1;
156 	if (fRestrictions.max_transfer_size == 0)
157 		fRestrictions.max_transfer_size = ~(size_t)0;
158 	if (fRestrictions.max_segment_size == 0)
159 		fRestrictions.max_segment_size = ~(size_t)0;
160 
161 	if (_NeedsBoundsBuffers()) {
162 		fBounceBufferSize = fRestrictions.max_segment_size
163 			* min_c(fRestrictions.max_segment_count, 4);
164 		if (fBounceBufferSize > kMaxBounceBufferSize)
165 			fBounceBufferSize = kMaxBounceBufferSize;
166 		TRACE("DMAResource::Init(): chose bounce buffer size %lu\n",
167 			fBounceBufferSize);
168 	}
169 
170 	dprintf("DMAResource@%p: low/high %lx/%lx, max segment count %lu, align %lu, "
171 		"boundary %lu, max transfer %lu, max segment size %lu\n", this,
172 		fRestrictions.low_address, fRestrictions.high_address,
173 		fRestrictions.max_segment_count, fRestrictions.alignment,
174 		fRestrictions.boundary, fRestrictions.max_transfer_size,
175 		fRestrictions.max_segment_size);
176 
177 	fScratchVecs = (iovec*)malloc(
178 		sizeof(iovec) * fRestrictions.max_segment_count);
179 	if (fScratchVecs == NULL)
180 		return B_NO_MEMORY;
181 
182 	for (size_t i = 0; i < fBufferCount; i++) {
183 		DMABuffer* buffer;
184 		status_t error = CreateBuffer(&buffer);
185 		if (error != B_OK)
186 			return error;
187 
188 		fDMABuffers.Add(buffer);
189 	}
190 
191 	// TODO: create bounce buffers in as few areas as feasible
192 	for (size_t i = 0; i < fBounceBufferCount; i++) {
193 		DMABounceBuffer* buffer;
194 		status_t error = CreateBounceBuffer(&buffer);
195 		if (error != B_OK)
196 			return error;
197 
198 		fBounceBuffers.Add(buffer);
199 	}
200 
201 	return B_OK;
202 }
203 
204 
205 status_t
206 DMAResource::CreateBuffer(DMABuffer** _buffer)
207 {
208 	DMABuffer* buffer = DMABuffer::Create(fRestrictions.max_segment_count);
209 	if (buffer == NULL)
210 		return B_NO_MEMORY;
211 
212 	*_buffer = buffer;
213 	return B_OK;
214 }
215 
216 
217 status_t
218 DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer)
219 {
220 	void* bounceBuffer = NULL;
221 	addr_t physicalBase = 0;
222 	area_id area = -1;
223 	size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE);
224 
225 	if (fRestrictions.alignment > B_PAGE_SIZE)
226 		dprintf("dma buffer restrictions not yet implemented: alignment %lu\n", fRestrictions.alignment);
227 	if (fRestrictions.boundary > B_PAGE_SIZE)
228 		dprintf("dma buffer restrictions not yet implemented: boundary %lu\n", fRestrictions.boundary);
229 
230 	bounceBuffer = (void*)fRestrictions.low_address;
231 // TODO: We also need to enforce the boundary restrictions.
232 	area = create_area("dma buffer", &bounceBuffer, B_PHYSICAL_BASE_ADDRESS,
233 		size, B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
234 	if (area < B_OK)
235 		return area;
236 
237 	physical_entry entry;
238 	if (get_memory_map(bounceBuffer, size, &entry, 1) != B_OK) {
239 		panic("get_memory_map() failed.");
240 		delete_area(area);
241 		return B_ERROR;
242 	}
243 
244 	physicalBase = (addr_t)entry.address;
245 
246 	if (fRestrictions.high_address < physicalBase + size) {
247 		delete_area(area);
248 		return B_NO_MEMORY;
249 	}
250 
251 	DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer;
252 	if (buffer == NULL) {
253 		delete_area(area);
254 		return B_NO_MEMORY;
255 	}
256 
257 	buffer->address = bounceBuffer;
258 	buffer->physical_address = physicalBase;
259 	buffer->size = size;
260 
261 	*_buffer = buffer;
262 	return B_OK;
263 }
264 
265 
266 inline void
267 DMAResource::_RestrictBoundaryAndSegmentSize(addr_t base, addr_t& length)
268 {
269 	if (length > fRestrictions.max_segment_size)
270 		length = fRestrictions.max_segment_size;
271 	if (fRestrictions.boundary > 0) {
272 		addr_t baseBoundary = base / fRestrictions.boundary;
273 		if (baseBoundary
274 				!= (base + (length - 1)) / fRestrictions.boundary) {
275 			length = (baseBoundary + 1) * fRestrictions.boundary - base;
276 		}
277 	}
278 }
279 
280 
281 void
282 DMAResource::_CutBuffer(DMABuffer& buffer, addr_t& physicalBounceBuffer,
283 	size_t& bounceLeft, size_t toCut)
284 {
285 	int32 vecCount = buffer.VecCount();
286 	for (int32 i = vecCount - 1; toCut > 0 && i >= 0; i--) {
287 		iovec& vec = buffer.VecAt(i);
288 		size_t length = vec.iov_len;
289 		bool inBounceBuffer = buffer.UsesBounceBufferAt(i);
290 
291 		if (length <= toCut) {
292 			vecCount--;
293 			toCut -= length;
294 
295 			if (inBounceBuffer) {
296 				bounceLeft += length;
297 				physicalBounceBuffer -= length;
298 			}
299 		} else {
300 			vec.iov_len -= toCut;
301 
302 			if (inBounceBuffer) {
303 				bounceLeft += toCut;
304 				physicalBounceBuffer -= toCut;
305 			}
306 			break;
307 		}
308 	}
309 
310 	buffer.SetVecCount(vecCount);
311 }
312 
313 
314 /*!	Adds \a length bytes from the bounce buffer to the DMABuffer \a buffer.
315 	Takes care of boundary, and segment restrictions. \a length must be aligned.
316 	If \a fixedLength is requested, this function will fail if it cannot
317 	satisfy the request.
318 
319 	\return 0 if the request cannot be satisfied. There could have been some
320 		additions to the DMA buffer, and you will need to cut them back.
321 	TODO: is that what we want here?
322 	\return >0 the number of bytes added to the buffer.
323 */
324 size_t
325 DMAResource::_AddBounceBuffer(DMABuffer& buffer, addr_t& physicalBounceBuffer,
326 	size_t& bounceLeft, size_t length, bool fixedLength)
327 {
328 	if (bounceLeft < length) {
329 		if (fixedLength)
330 			return 0;
331 
332 		length = bounceLeft;
333 	}
334 
335 	size_t bounceUsed = 0;
336 
337 	uint32 vecCount = buffer.VecCount();
338 	if (vecCount > 0) {
339 		// see if we can join the bounce buffer with the previously last vec
340 		iovec& vec = buffer.VecAt(vecCount - 1);
341 		addr_t vecBase = (addr_t)vec.iov_base;
342 		size_t vecLength = vec.iov_len;
343 
344 		if (vecBase + vecLength == physicalBounceBuffer) {
345 			vecLength += length;
346 			_RestrictBoundaryAndSegmentSize(vecBase, vecLength);
347 
348 			size_t lengthDiff = vecLength - vec.iov_len;
349 			length -= lengthDiff;
350 
351 			physicalBounceBuffer += lengthDiff;
352 			bounceLeft -= lengthDiff;
353 			bounceUsed += lengthDiff;
354 
355 			vec.iov_len = vecLength;
356 		}
357 	}
358 
359 	while (length > 0) {
360 		// We need to add another bounce vec
361 
362 		if (vecCount == fRestrictions.max_segment_count)
363 			return fixedLength ? 0 : bounceUsed;
364 
365 		addr_t vecLength = length;
366 		_RestrictBoundaryAndSegmentSize(physicalBounceBuffer, vecLength);
367 
368 		buffer.AddVec((void*)physicalBounceBuffer, vecLength);
369 		vecCount++;
370 
371 		physicalBounceBuffer += vecLength;
372 		bounceLeft -= vecLength;
373 		bounceUsed += vecLength;
374 		length -= vecLength;
375 	}
376 
377 	return bounceUsed;
378 }
379 
380 
381 status_t
382 DMAResource::TranslateNext(IORequest* request, IOOperation* operation,
383 	size_t maxOperationLength)
384 {
385 	IOBuffer* buffer = request->Buffer();
386 	off_t originalOffset = request->Offset() + request->Length()
387 		- request->RemainingBytes();
388 	off_t offset = originalOffset;
389 	size_t partialBegin = offset & (fBlockSize - 1);
390 
391 	// current iteration state
392 	uint32 vecIndex = request->VecIndex();
393 	uint32 vecOffset = request->VecOffset();
394 	size_t totalLength = min_c(request->RemainingBytes(),
395 		fRestrictions.max_transfer_size);
396 
397 	if (maxOperationLength > 0
398 		&& maxOperationLength < totalLength + partialBegin) {
399 		totalLength = maxOperationLength - partialBegin;
400 	}
401 
402 	MutexLocker locker(fLock);
403 
404 	DMABuffer* dmaBuffer = fDMABuffers.RemoveHead();
405 	if (dmaBuffer == NULL)
406 		return B_BUSY;
407 
408 	dmaBuffer->SetVecCount(0);
409 
410 	iovec* vecs = NULL;
411 	uint32 segmentCount = 0;
412 
413 	TRACE("  offset %Ld, remaining size: %lu, block size %lu -> partial: %lu\n",
414 		offset, request->RemainingBytes(), fBlockSize, partialBegin);
415 
416 	if (buffer->IsVirtual()) {
417 		// Unless we need the bounce buffer anyway, we have to translate the
418 		// virtual addresses to physical addresses, so we can check the DMA
419 		// restrictions.
420 		TRACE("  buffer is virtual %s\n", buffer->IsUser() ? "user" : "kernel");
421 		// TODO: !partialOperation || totalLength >= fBlockSize
422 		// TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize.
423 		if (true) {
424 			size_t transferLeft = totalLength;
425 			vecs = fScratchVecs;
426 
427 			TRACE("  create physical map (for %ld vecs)\n", buffer->VecCount());
428 			for (uint32 i = vecIndex; i < buffer->VecCount(); i++) {
429 				iovec& vec = buffer->VecAt(i);
430 				addr_t base = (addr_t)vec.iov_base + vecOffset;
431 				size_t size = vec.iov_len - vecOffset;
432 				vecOffset = 0;
433 				if (size > transferLeft)
434 					size = transferLeft;
435 
436 				while (size > 0 && segmentCount
437 						< fRestrictions.max_segment_count) {
438 					physical_entry entry;
439 					uint32 count = 1;
440 					get_memory_map_etc(request->Team(), (void*)base, size,
441 						&entry, &count);
442 
443 					vecs[segmentCount].iov_base = entry.address;
444 					vecs[segmentCount].iov_len = entry.size;
445 
446 					transferLeft -= entry.size;
447 					base += entry.size;
448 					size -= entry.size;
449 					segmentCount++;
450 				}
451 
452 				if (transferLeft == 0)
453 					break;
454 			}
455 
456 			totalLength -= transferLeft;
457 		}
458 
459 		vecIndex = 0;
460 		vecOffset = 0;
461 	} else {
462 		// We do already have physical addresses.
463 		locker.Unlock();
464 		vecs = buffer->Vecs();
465 		segmentCount = min_c(buffer->VecCount() - vecIndex,
466 			fRestrictions.max_segment_count);
467 	}
468 
469 #ifdef TRACE_DMA_RESOURCE
470 	TRACE("  physical count %lu\n", segmentCount);
471 	for (uint32 i = 0; i < segmentCount; i++) {
472 		TRACE("    [%lu] %p, %lu\n", i, vecs[vecIndex + i].iov_base,
473 			vecs[vecIndex + i].iov_len);
474 	}
475 #endif
476 
477 	// check alignment, boundaries, etc. and set vecs in DMA buffer
478 
479 	// Fetch a bounce buffer we can use for the DMABuffer.
480 	// TODO: We should do that lazily when needed!
481 	DMABounceBuffer* bounceBuffer = NULL;
482 	if (_NeedsBoundsBuffers()) {
483 		bounceBuffer = fBounceBuffers.Head();
484 		if (bounceBuffer == NULL)
485 			return B_BUSY;
486 	}
487 	dmaBuffer->SetBounceBuffer(bounceBuffer);
488 
489 	size_t dmaLength = 0;
490 	addr_t physicalBounceBuffer = dmaBuffer->PhysicalBounceBufferAddress();
491 	size_t bounceLeft = fBounceBufferSize;
492 	size_t transferLeft = totalLength;
493 
494 	// If the offset isn't block-aligned, use the bounce buffer to bridge the
495 	// gap to the start of the vec.
496 	if (partialBegin > 0) {
497 		size_t length;
498 		if (request->IsWrite()) {
499 			// we always need to read in a whole block for the partial write
500 			length = fBlockSize;
501 		} else {
502 			length = (partialBegin + fRestrictions.alignment - 1)
503 				& ~(fRestrictions.alignment - 1);
504 		}
505 
506 		if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft,
507 				length, true) == 0) {
508 			TRACE("  adding partial begin failed, length %lu!\n", length);
509 			return B_BAD_VALUE;
510 		}
511 
512 		dmaLength += length;
513 
514 		size_t transferred = length - partialBegin;
515 		vecOffset += transferred;
516 		offset -= partialBegin;
517 
518 		if (transferLeft > transferred)
519 			transferLeft -= transferred;
520 		else
521 			transferLeft = 0;
522 
523 		TRACE("  partial begin, using bounce buffer: offset: %lld, length: "
524 			"%lu\n", offset, length);
525 	}
526 
527 	for (uint32 i = vecIndex;
528 			i < vecIndex + segmentCount && transferLeft > 0;) {
529 		if (dmaBuffer->VecCount() >= fRestrictions.max_segment_count)
530 			break;
531 
532 		const iovec& vec = vecs[i];
533 		if (vec.iov_len <= vecOffset) {
534 			vecOffset -= vec.iov_len;
535 			i++;
536 			continue;
537 		}
538 
539 		addr_t base = (addr_t)vec.iov_base + vecOffset;
540 		size_t maxLength = vec.iov_len - vecOffset;
541 		if (maxLength > transferLeft)
542 			maxLength = transferLeft;
543 		size_t length = maxLength;
544 
545 		// Cut the vec according to transfer size, segment size, and boundary.
546 
547 		if (dmaLength + length > fRestrictions.max_transfer_size) {
548 			length = fRestrictions.max_transfer_size - dmaLength;
549 			TRACE("  vec %lu: restricting length to %lu due to transfer size "
550 				"limit\n", i, length);
551 		}
552 		_RestrictBoundaryAndSegmentSize(base, length);
553 
554 		size_t useBounceBufferSize = 0;
555 
556 		// Check low address: use bounce buffer for range to low address.
557 		// Check alignment: if not aligned, use bounce buffer for complete vec.
558 		if (base < fRestrictions.low_address) {
559 			useBounceBufferSize = fRestrictions.low_address - base;
560 			TRACE("  vec %lu: below low address, using bounce buffer: %lu\n", i,
561 				useBounceBufferSize);
562 		} else if (base & (fRestrictions.alignment - 1)) {
563 			useBounceBufferSize = length;
564 			TRACE("  vec %lu: misalignment, using bounce buffer: %lu\n", i,
565 				useBounceBufferSize);
566 		}
567 
568 		// Enforce high address restriction
569 		if (base > fRestrictions.high_address)
570 			useBounceBufferSize = length;
571 		else if (base + length > fRestrictions.high_address)
572 			length = fRestrictions.high_address - base;
573 
574 		// Align length as well
575 		if (useBounceBufferSize == 0)
576 			length &= ~(fRestrictions.alignment - 1);
577 
578 		// If length is 0, use bounce buffer for complete vec.
579 		if (length == 0) {
580 			length = maxLength;
581 			useBounceBufferSize = length;
582 			TRACE("  vec %lu: 0 length, using bounce buffer: %lu\n", i,
583 				useBounceBufferSize);
584 		}
585 
586 		if (useBounceBufferSize > 0) {
587 			// alignment could still be wrong (we round up here)
588 			useBounceBufferSize = (useBounceBufferSize
589 				+ fRestrictions.alignment - 1) & ~(fRestrictions.alignment - 1);
590 
591 			length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
592 				bounceLeft, useBounceBufferSize, false);
593 			if (length == 0) {
594 				TRACE("  vec %lu: out of bounce buffer space\n", i);
595 				// We don't have any bounce buffer space left, we need to move
596 				// this request to the next I/O operation.
597 				break;
598 			}
599 			TRACE("  vec %lu: final bounce length: %lu\n", i, length);
600 		} else {
601 			TRACE("  vec %lu: final length restriction: %lu\n", i, length);
602 			dmaBuffer->AddVec((void*)base, length);
603 		}
604 
605 		dmaLength += length;
606 		vecOffset += length;
607 		transferLeft -= min_c(length, transferLeft);
608 	}
609 
610 	// If we're writing partially, we always need to have a block sized bounce
611 	// buffer (or else we would overwrite memory to be written on the read in
612 	// the first phase).
613 	off_t requestEnd = request->Offset() + request->Length();
614 	if (request->IsWrite()) {
615 		size_t diff = dmaLength & (fBlockSize - 1);
616 
617 		// If the transfer length is block aligned and we're writing past the
618 		// end of the given data, we still have to check the whether the last
619 		// vec is a bounce buffer segment shorter than the block size. If so, we
620 		// have to cut back the complete block and use a bounce buffer for it
621 		// entirely.
622 		if (diff == 0 && offset + dmaLength > requestEnd) {
623 			const iovec& dmaVec = dmaBuffer->VecAt(dmaBuffer->VecCount() - 1);
624 			ASSERT((addr_t)dmaVec.iov_base
625 					>= dmaBuffer->PhysicalBounceBufferAddress()
626 				&& (addr_t)dmaVec.iov_base
627 					< dmaBuffer->PhysicalBounceBufferAddress()
628 						+ fBounceBufferSize);
629 				// We can be certain that the last vec is a bounce buffer vec,
630 				// since otherwise the DMA buffer couldn't exceed the end of the
631 				// request data.
632 			if (dmaVec.iov_len < fBlockSize)
633 				diff = fBlockSize;
634 		}
635 
636 		if (diff != 0) {
637 			// Not yet block aligned -- cut back to the previous block and add
638 			// a block-sized bounce buffer segment.
639 			TRACE("  partial end write: %lu, diff %lu\n", dmaLength, diff);
640 
641 			_CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, diff);
642 			dmaLength -= diff;
643 
644 			if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
645 					bounceLeft, fBlockSize, true) == 0) {
646 				// If we cannot write anything, we can't process the request at
647 				// all.
648 				TRACE("  adding bounce buffer failed!!!\n");
649 				if (dmaLength == 0)
650 					return B_BAD_VALUE;
651 			} else
652 				dmaLength += fBlockSize;
653 		}
654 	}
655 
656 	// If total length not block aligned, use bounce buffer for padding (read
657 	// case only).
658 	while ((dmaLength & (fBlockSize - 1)) != 0) {
659 		TRACE("  dmaLength not block aligned: %lu\n", dmaLength);
660 			size_t length = (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1);
661 
662 		// If total length > max transfer size, segment count > max segment
663 		// count, truncate.
664 		// TODO: sometimes we can replace the last vec with the bounce buffer
665 		// to let it match the restrictions.
666 		if (length > fRestrictions.max_transfer_size
667 			|| dmaBuffer->VecCount() == fRestrictions.max_segment_count
668 			|| bounceLeft < length - dmaLength) {
669 			// cut the part of dma length
670 			TRACE("  can't align length due to max transfer size, segment "
671 				"count restrictions, or lacking bounce buffer space\n");
672 			size_t toCut = dmaLength
673 				& (max_c(fBlockSize, fRestrictions.alignment) - 1);
674 			dmaLength -= toCut;
675 			if (dmaLength == 0) {
676 				// This can only happen, when we have too many small segments
677 				// and hit the max segment count. In this case we just use the
678 				// bounce buffer for as much as possible of the total length.
679 				dmaBuffer->SetVecCount(0);
680 				addr_t base = dmaBuffer->PhysicalBounceBufferAddress();
681 				dmaLength = min_c(totalLength, fBounceBufferSize)
682 					& ~(max_c(fBlockSize, fRestrictions.alignment) - 1);
683 				_RestrictBoundaryAndSegmentSize(base, dmaLength);
684 				dmaBuffer->AddVec((void*)base, dmaLength);
685 
686 				physicalBounceBuffer = base + dmaLength;
687 				bounceLeft = fBounceBufferSize - dmaLength;
688 			} else {
689 				_CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, toCut);
690 			}
691 		} else {
692 			TRACE("  adding %lu bytes final bounce buffer\n",
693 				length - dmaLength);
694 			length -= dmaLength;
695 			length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
696 				bounceLeft, length, true);
697 			if (length == 0)
698 				panic("don't do this to me!");
699 			dmaLength += length;
700 		}
701 	}
702 
703 	operation->SetBuffer(dmaBuffer);
704 	operation->SetBlockSize(fBlockSize);
705 	operation->SetOriginalRange(originalOffset,
706 		min_c(offset + dmaLength, requestEnd) - originalOffset);
707 	operation->SetRange(offset, dmaLength);
708 	operation->SetPartial(partialBegin != 0, offset + dmaLength > requestEnd);
709 
710 	// If we don't need the bounce buffer, we put it back, otherwise
711 	operation->SetUsesBounceBuffer(bounceLeft < fBounceBufferSize);
712 	if (operation->UsesBounceBuffer())
713 		fBounceBuffers.RemoveHead();
714 	else
715 		dmaBuffer->SetBounceBuffer(NULL);
716 
717 
718 	status_t error = operation->Prepare(request);
719 	if (error != B_OK)
720 		return error;
721 
722 	request->Advance(operation->OriginalLength());
723 
724 	return B_OK;
725 }
726 
727 
728 void
729 DMAResource::RecycleBuffer(DMABuffer* buffer)
730 {
731 	if (buffer == NULL)
732 		return;
733 
734 	MutexLocker _(fLock);
735 	fDMABuffers.Add(buffer);
736 	if (buffer->BounceBuffer() != NULL) {
737 		fBounceBuffers.Add(buffer->BounceBuffer());
738 		buffer->SetBounceBuffer(NULL);
739 	}
740 }
741 
742 
743 bool
744 DMAResource::_NeedsBoundsBuffers() const
745 {
746 	return fRestrictions.alignment > 1
747 		|| fRestrictions.low_address != 0
748 		|| fRestrictions.high_address != ~(addr_t)0
749 		|| fBlockSize > 1;
750 }
751 
752 
753 
754 
755 #if 0
756 
757 
758 status_t
759 create_dma_resource(restrictions)
760 {
761 	// Restrictions are: transfer size, address space, alignment
762 	// segment min/max size, num segments
763 }
764 
765 
766 void
767 delete_dma_resource(resource)
768 {
769 }
770 
771 
772 dma_buffer_alloc(resource, size)
773 {
774 }
775 
776 
777 dma_buffer_free(buffer)
778 {
779 //	Allocates or frees memory in that DMA buffer.
780 }
781 
782 #endif	// 0
783