1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8 #include "dma_resources.h"
9
10 #include <device_manager.h>
11
12 #include <kernel.h>
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15
16 #include "IORequest.h"
17
18
19 //#define TRACE_DMA_RESOURCE
20 #ifdef TRACE_DMA_RESOURCE
21 # define TRACE(x...) dprintf(x)
22 #else
23 # define TRACE(x...) ;
24 #endif
25
26
27 extern device_manager_info gDeviceManagerModule;
28
29 const phys_size_t kMaxBounceBufferSize = 4 * B_PAGE_SIZE;
30
31
32 DMABuffer*
Create(size_t count)33 DMABuffer::Create(size_t count)
34 {
35 DMABuffer* buffer = (DMABuffer*)malloc(
36 sizeof(DMABuffer) + sizeof(generic_io_vec) * (count - 1));
37 if (buffer == NULL)
38 return NULL;
39
40 buffer->fVecCount = count;
41
42 return buffer;
43 }
44
45
46 void
SetVecCount(uint32 count)47 DMABuffer::SetVecCount(uint32 count)
48 {
49 fVecCount = count;
50 }
51
52
53 void
AddVec(generic_addr_t base,generic_size_t size)54 DMABuffer::AddVec(generic_addr_t base, generic_size_t size)
55 {
56 generic_io_vec& vec = fVecs[fVecCount++];
57 vec.base = base;
58 vec.length = size;
59 }
60
61
62 bool
UsesBounceBufferAt(uint32 index)63 DMABuffer::UsesBounceBufferAt(uint32 index)
64 {
65 if (index >= fVecCount || fBounceBuffer == NULL)
66 return false;
67
68 return fVecs[index].base >= fBounceBuffer->physical_address
69 && fVecs[index].base
70 < fBounceBuffer->physical_address + fBounceBuffer->size;
71 }
72
73
74 void
Dump() const75 DMABuffer::Dump() const
76 {
77 kprintf("DMABuffer at %p\n", this);
78
79 kprintf(" bounce buffer: %p (physical %#" B_PRIxPHYSADDR ")\n",
80 fBounceBuffer->address, fBounceBuffer->physical_address);
81 kprintf(" bounce buffer size: %" B_PRIxPHYSADDR "\n", fBounceBuffer->size);
82 kprintf(" vecs: %" B_PRIu32 "\n", fVecCount);
83
84 for (uint32 i = 0; i < fVecCount; i++) {
85 kprintf(" [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
86 i, fVecs[i].base, fVecs[i].length);
87 }
88 }
89
90
91 // #pragma mark -
92
93
DMAResource()94 DMAResource::DMAResource()
95 :
96 fBlockSize(0),
97 fScratchVecs(NULL)
98 {
99 mutex_init(&fLock, "dma resource");
100 }
101
102
~DMAResource()103 DMAResource::~DMAResource()
104 {
105 mutex_lock(&fLock);
106 mutex_destroy(&fLock);
107 free(fScratchVecs);
108
109 // TODO: Delete DMABuffers and BounceBuffers!
110 }
111
112
113 status_t
Init(device_node * node,generic_size_t blockSize,uint32 bufferCount,uint32 bounceBufferCount)114 DMAResource::Init(device_node* node, generic_size_t blockSize,
115 uint32 bufferCount, uint32 bounceBufferCount)
116 {
117 dma_restrictions restrictions;
118 memset(&restrictions, 0, sizeof(dma_restrictions));
119
120 // TODO: add DMA attributes instead of reusing block_io's
121
122 uint32 value;
123 if (gDeviceManagerModule.get_attr_uint32(node,
124 B_DMA_ALIGNMENT, &value, true) == B_OK)
125 restrictions.alignment = (generic_size_t)value + 1;
126
127 if (gDeviceManagerModule.get_attr_uint32(node,
128 B_DMA_BOUNDARY, &value, true) == B_OK)
129 restrictions.boundary = (generic_size_t)value + 1;
130
131 if (gDeviceManagerModule.get_attr_uint32(node,
132 B_DMA_MAX_SEGMENT_BLOCKS, &value, true) == B_OK)
133 restrictions.max_segment_size = (generic_size_t)value * blockSize;
134
135 if (gDeviceManagerModule.get_attr_uint32(node,
136 B_DMA_MAX_TRANSFER_BLOCKS, &value, true) == B_OK)
137 restrictions.max_transfer_size = (generic_size_t)value * blockSize;
138
139 if (gDeviceManagerModule.get_attr_uint32(node,
140 B_DMA_MAX_SEGMENT_COUNT, &value, true) == B_OK)
141 restrictions.max_segment_count = value;
142
143 uint64 value64;
144 if (gDeviceManagerModule.get_attr_uint64(node,
145 B_DMA_LOW_ADDRESS, &value64, true) == B_OK) {
146 restrictions.low_address = value64;
147 }
148
149 if (gDeviceManagerModule.get_attr_uint64(node,
150 B_DMA_HIGH_ADDRESS, &value64, true) == B_OK) {
151 restrictions.high_address = value64;
152 }
153
154 return Init(restrictions, blockSize, bufferCount, bounceBufferCount);
155 }
156
157
158 status_t
Init(const dma_restrictions & restrictions,generic_size_t blockSize,uint32 bufferCount,uint32 bounceBufferCount)159 DMAResource::Init(const dma_restrictions& restrictions,
160 generic_size_t blockSize, uint32 bufferCount, uint32 bounceBufferCount)
161 {
162 ASSERT(restrictions.alignment <= blockSize);
163 ASSERT(fBlockSize == 0);
164
165 fRestrictions = restrictions;
166 fBlockSize = blockSize == 0 ? 1 : blockSize;
167 fBufferCount = bufferCount;
168 fBounceBufferCount = bounceBufferCount;
169 fBounceBufferSize = 0;
170
171 if (fRestrictions.high_address == 0)
172 fRestrictions.high_address = ~(generic_addr_t)0;
173 if (fRestrictions.max_segment_count == 0)
174 fRestrictions.max_segment_count = 16;
175 if (fRestrictions.alignment == 0)
176 fRestrictions.alignment = 1;
177 if (fRestrictions.max_transfer_size == 0)
178 fRestrictions.max_transfer_size = ~(generic_size_t)0;
179 if (fRestrictions.max_segment_size == 0)
180 fRestrictions.max_segment_size = ~(generic_size_t)0;
181
182 if (_NeedsBoundsBuffers()) {
183 fBounceBufferSize = fRestrictions.max_segment_size
184 * min_c(fRestrictions.max_segment_count, 4);
185 if (fBounceBufferSize > kMaxBounceBufferSize)
186 fBounceBufferSize = kMaxBounceBufferSize;
187 TRACE("DMAResource::Init(): chose bounce buffer size %lu\n",
188 fBounceBufferSize);
189 }
190
191 dprintf("DMAResource@%p: low/high %" B_PRIxGENADDR "/%" B_PRIxGENADDR
192 ", max segment count %" B_PRIu32 ", align %" B_PRIuGENADDR ", "
193 "boundary %" B_PRIuGENADDR ", max transfer %" B_PRIuGENADDR
194 ", max segment size %" B_PRIuGENADDR "\n", this,
195 fRestrictions.low_address, fRestrictions.high_address,
196 fRestrictions.max_segment_count, fRestrictions.alignment,
197 fRestrictions.boundary, fRestrictions.max_transfer_size,
198 fRestrictions.max_segment_size);
199
200 fScratchVecs = (generic_io_vec*)malloc(
201 sizeof(generic_io_vec) * fRestrictions.max_segment_count);
202 if (fScratchVecs == NULL)
203 return B_NO_MEMORY;
204
205 for (size_t i = 0; i < fBufferCount; i++) {
206 DMABuffer* buffer;
207 status_t error = CreateBuffer(&buffer);
208 if (error != B_OK)
209 return error;
210
211 fDMABuffers.Add(buffer);
212 }
213
214 // TODO: create bounce buffers in as few areas as feasible
215 for (size_t i = 0; i < fBounceBufferCount; i++) {
216 DMABounceBuffer* buffer;
217 status_t error = CreateBounceBuffer(&buffer);
218 if (error != B_OK)
219 return error;
220
221 fBounceBuffers.Add(buffer);
222 }
223
224 return B_OK;
225 }
226
227
228 status_t
CreateBuffer(DMABuffer ** _buffer)229 DMAResource::CreateBuffer(DMABuffer** _buffer)
230 {
231 DMABuffer* buffer = DMABuffer::Create(fRestrictions.max_segment_count);
232 if (buffer == NULL)
233 return B_NO_MEMORY;
234
235 *_buffer = buffer;
236 return B_OK;
237 }
238
239
240 status_t
CreateBounceBuffer(DMABounceBuffer ** _buffer)241 DMAResource::CreateBounceBuffer(DMABounceBuffer** _buffer)
242 {
243 void* bounceBuffer = NULL;
244 phys_addr_t physicalBase = 0;
245 area_id area = -1;
246 phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE);
247
248 virtual_address_restrictions virtualRestrictions = {};
249 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
250 physical_address_restrictions physicalRestrictions = {};
251 physicalRestrictions.low_address = fRestrictions.low_address;
252 physicalRestrictions.high_address = fRestrictions.high_address;
253 physicalRestrictions.alignment = fRestrictions.alignment;
254 physicalRestrictions.boundary = fRestrictions.boundary;
255 area = create_area_etc(B_SYSTEM_TEAM, "dma buffer", size, B_CONTIGUOUS,
256 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, 0, &virtualRestrictions,
257 &physicalRestrictions, &bounceBuffer);
258 if (area < B_OK)
259 return area;
260
261 physical_entry entry;
262 if (get_memory_map(bounceBuffer, size, &entry, 1) != B_OK) {
263 panic("get_memory_map() failed.");
264 delete_area(area);
265 return B_ERROR;
266 }
267
268 physicalBase = entry.address;
269
270 ASSERT(fRestrictions.high_address >= physicalBase + size);
271
272 DMABounceBuffer* buffer = new(std::nothrow) DMABounceBuffer;
273 if (buffer == NULL) {
274 delete_area(area);
275 return B_NO_MEMORY;
276 }
277
278 buffer->address = bounceBuffer;
279 buffer->physical_address = physicalBase;
280 buffer->size = size;
281
282 *_buffer = buffer;
283 return B_OK;
284 }
285
286
287 inline void
_RestrictBoundaryAndSegmentSize(generic_addr_t base,generic_addr_t & length)288 DMAResource::_RestrictBoundaryAndSegmentSize(generic_addr_t base,
289 generic_addr_t& length)
290 {
291 if (length > fRestrictions.max_segment_size)
292 length = fRestrictions.max_segment_size;
293 if (fRestrictions.boundary > 0) {
294 generic_addr_t baseBoundary = base / fRestrictions.boundary;
295 if (baseBoundary
296 != (base + (length - 1)) / fRestrictions.boundary) {
297 length = (baseBoundary + 1) * fRestrictions.boundary - base;
298 }
299 }
300 }
301
302
303 void
_CutBuffer(DMABuffer & buffer,phys_addr_t & physicalBounceBuffer,phys_size_t & bounceLeft,generic_size_t toCut)304 DMAResource::_CutBuffer(DMABuffer& buffer, phys_addr_t& physicalBounceBuffer,
305 phys_size_t& bounceLeft, generic_size_t toCut)
306 {
307 int32 vecCount = buffer.VecCount();
308 for (int32 i = vecCount - 1; toCut > 0 && i >= 0; i--) {
309 generic_io_vec& vec = buffer.VecAt(i);
310 generic_size_t length = vec.length;
311 bool inBounceBuffer = buffer.UsesBounceBufferAt(i);
312
313 if (length <= toCut) {
314 vecCount--;
315 toCut -= length;
316
317 if (inBounceBuffer) {
318 bounceLeft += length;
319 physicalBounceBuffer -= length;
320 }
321 } else {
322 vec.length -= toCut;
323
324 if (inBounceBuffer) {
325 bounceLeft += toCut;
326 physicalBounceBuffer -= toCut;
327 }
328 break;
329 }
330 }
331
332 buffer.SetVecCount(vecCount);
333 }
334
335
336 /*! Adds \a length bytes from the bounce buffer to the DMABuffer \a buffer.
337 Takes care of boundary, and segment restrictions. \a length must be aligned.
338 If \a fixedLength is requested, this function will fail if it cannot
339 satisfy the request.
340
341 \return 0 if the request cannot be satisfied. There could have been some
342 additions to the DMA buffer, and you will need to cut them back.
343 TODO: is that what we want here?
344 \return >0 the number of bytes added to the buffer.
345 */
346 phys_size_t
_AddBounceBuffer(DMABuffer & buffer,phys_addr_t & physicalBounceBuffer,phys_size_t & bounceLeft,generic_size_t length,bool fixedLength)347 DMAResource::_AddBounceBuffer(DMABuffer& buffer,
348 phys_addr_t& physicalBounceBuffer, phys_size_t& bounceLeft,
349 generic_size_t length, bool fixedLength)
350 {
351 if (bounceLeft < length) {
352 if (fixedLength)
353 return 0;
354
355 length = bounceLeft;
356 }
357
358 phys_size_t bounceUsed = 0;
359
360 uint32 vecCount = buffer.VecCount();
361 if (vecCount > 0) {
362 // see if we can join the bounce buffer with the previously last vec
363 generic_io_vec& vec = buffer.VecAt(vecCount - 1);
364 generic_addr_t vecBase = vec.base;
365 generic_size_t vecLength = vec.length;
366
367 if (vecBase + vecLength == physicalBounceBuffer) {
368 vecLength += length;
369 _RestrictBoundaryAndSegmentSize(vecBase, vecLength);
370
371 generic_size_t lengthDiff = vecLength - vec.length;
372 length -= lengthDiff;
373
374 physicalBounceBuffer += lengthDiff;
375 bounceLeft -= lengthDiff;
376 bounceUsed += lengthDiff;
377
378 vec.length = vecLength;
379 }
380 }
381
382 while (length > 0) {
383 // We need to add another bounce vec
384
385 if (vecCount == fRestrictions.max_segment_count)
386 return fixedLength ? 0 : bounceUsed;
387
388 generic_addr_t vecLength = length;
389 _RestrictBoundaryAndSegmentSize(physicalBounceBuffer, vecLength);
390
391 buffer.AddVec(physicalBounceBuffer, vecLength);
392 vecCount++;
393
394 physicalBounceBuffer += vecLength;
395 bounceLeft -= vecLength;
396 bounceUsed += vecLength;
397 length -= vecLength;
398 }
399
400 return bounceUsed;
401 }
402
403
404 status_t
TranslateNext(IORequest * request,IOOperation * operation,generic_size_t maxOperationLength)405 DMAResource::TranslateNext(IORequest* request, IOOperation* operation,
406 generic_size_t maxOperationLength)
407 {
408 IOBuffer* buffer = request->Buffer();
409 off_t originalOffset = request->Offset() + request->Length()
410 - request->RemainingBytes();
411 off_t offset = originalOffset;
412 generic_size_t partialBegin = offset & (fBlockSize - 1);
413
414 // current iteration state
415 uint32 vecIndex = request->VecIndex();
416 uint32 vecOffset = request->VecOffset();
417 generic_size_t totalLength = min_c(request->RemainingBytes(),
418 fRestrictions.max_transfer_size);
419
420 if (maxOperationLength > 0
421 && maxOperationLength < totalLength + partialBegin) {
422 totalLength = maxOperationLength - partialBegin;
423 }
424
425 MutexLocker locker(fLock);
426
427 DMABuffer* dmaBuffer = fDMABuffers.RemoveHead();
428 if (dmaBuffer == NULL)
429 return B_BUSY;
430
431 dmaBuffer->SetVecCount(0);
432
433 generic_io_vec* vecs = NULL;
434 uint32 segmentCount = 0;
435
436 TRACE(" offset %" B_PRIdOFF ", remaining size: %lu, block size %lu -> partial: %lu\n",
437 offset, request->RemainingBytes(), fBlockSize, partialBegin);
438
439 if (buffer->IsVirtual()) {
440 // Unless we need the bounce buffer anyway, we have to translate the
441 // virtual addresses to physical addresses, so we can check the DMA
442 // restrictions.
443 TRACE(" buffer is virtual %s\n", buffer->IsUser() ? "user" : "kernel");
444 // TODO: !partialOperation || totalLength >= fBlockSize
445 // TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize.
446 if (true) {
447 generic_size_t transferLeft = totalLength;
448 vecs = fScratchVecs;
449
450 TRACE(" create physical map (for %ld vecs)\n", buffer->VecCount());
451 for (uint32 i = vecIndex; i < buffer->VecCount(); i++) {
452 generic_io_vec& vec = buffer->VecAt(i);
453 generic_addr_t base = vec.base + vecOffset;
454 generic_size_t size = vec.length - vecOffset;
455 vecOffset = 0;
456 if (size > transferLeft)
457 size = transferLeft;
458
459 while (size > 0 && segmentCount
460 < fRestrictions.max_segment_count) {
461 physical_entry entry;
462 uint32 count = 1;
463 get_memory_map_etc(request->TeamID(), (void*)base, size,
464 &entry, &count);
465
466 vecs[segmentCount].base = entry.address;
467 vecs[segmentCount].length = entry.size;
468
469 transferLeft -= entry.size;
470 base += entry.size;
471 size -= entry.size;
472 segmentCount++;
473 }
474
475 if (transferLeft == 0)
476 break;
477 }
478
479 totalLength -= transferLeft;
480 }
481
482 vecIndex = 0;
483 vecOffset = 0;
484 } else {
485 // We do already have physical addresses.
486 vecs = buffer->Vecs();
487 segmentCount = min_c(buffer->VecCount() - vecIndex,
488 fRestrictions.max_segment_count);
489 }
490
491 #ifdef TRACE_DMA_RESOURCE
492 TRACE(" physical count %" B_PRIu32 "\n", segmentCount);
493 for (uint32 i = 0; i < segmentCount; i++) {
494 TRACE(" [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIxGENADDR "\n",
495 i, vecs[vecIndex + i].base, vecs[vecIndex + i].length);
496 }
497 #endif
498
499 // check alignment, boundaries, etc. and set vecs in DMA buffer
500
501 // Fetch a bounce buffer we can use for the DMABuffer.
502 // TODO: We should do that lazily when needed!
503 DMABounceBuffer* bounceBuffer = NULL;
504 if (_NeedsBoundsBuffers()) {
505 bounceBuffer = fBounceBuffers.Head();
506 if (bounceBuffer == NULL)
507 return B_BUSY;
508 }
509 dmaBuffer->SetBounceBuffer(bounceBuffer);
510
511 generic_size_t dmaLength = 0;
512 phys_addr_t physicalBounceBuffer = dmaBuffer->PhysicalBounceBufferAddress();
513 phys_size_t bounceLeft = fBounceBufferSize;
514 generic_size_t transferLeft = totalLength;
515
516 // If the offset isn't block-aligned, use the bounce buffer to bridge the
517 // gap to the start of the vec.
518 if (partialBegin > 0) {
519 generic_size_t length;
520 if (request->IsWrite()) {
521 // we always need to read in a whole block for the partial write
522 length = fBlockSize;
523 } else {
524 length = (partialBegin + fRestrictions.alignment - 1)
525 & ~(fRestrictions.alignment - 1);
526 }
527
528 if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft,
529 length, true) == 0) {
530 TRACE(" adding partial begin failed, length %lu!\n", length);
531 return B_BAD_VALUE;
532 }
533
534 dmaLength += length;
535
536 generic_size_t transferred = length - partialBegin;
537 vecOffset += transferred;
538 offset -= partialBegin;
539
540 if (transferLeft > transferred)
541 transferLeft -= transferred;
542 else
543 transferLeft = 0;
544
545 TRACE(" partial begin, using bounce buffer: offset: %" B_PRIdOFF ", length: "
546 "%lu\n", offset, length);
547 }
548
549 for (uint32 i = vecIndex;
550 i < vecIndex + segmentCount && transferLeft > 0;) {
551 if (dmaBuffer->VecCount() >= fRestrictions.max_segment_count)
552 break;
553
554 const generic_io_vec& vec = vecs[i];
555 if (vec.length <= vecOffset) {
556 vecOffset -= vec.length;
557 i++;
558 continue;
559 }
560
561 generic_addr_t base = vec.base + vecOffset;
562 generic_size_t maxLength = vec.length - vecOffset;
563 if (maxLength > transferLeft)
564 maxLength = transferLeft;
565 generic_size_t length = maxLength;
566
567 // Cut the vec according to transfer size, segment size, and boundary.
568
569 if (dmaLength + length > fRestrictions.max_transfer_size) {
570 length = fRestrictions.max_transfer_size - dmaLength;
571 TRACE(" vec %" B_PRIu32 ": restricting length to %lu due to transfer size "
572 "limit\n", i, length);
573 }
574 _RestrictBoundaryAndSegmentSize(base, length);
575
576 phys_size_t useBounceBufferSize = 0;
577
578 // Check low address: use bounce buffer for range to low address.
579 // Check alignment: if not aligned, use bounce buffer for complete vec.
580 if (base < fRestrictions.low_address) {
581 useBounceBufferSize = fRestrictions.low_address - base;
582 TRACE(" vec %" B_PRIu32 ": below low address, using bounce buffer: %lu\n", i,
583 useBounceBufferSize);
584 } else if (base & (fRestrictions.alignment - 1)) {
585 useBounceBufferSize = length;
586 TRACE(" vec %" B_PRIu32 ": misalignment, using bounce buffer: %lu\n", i,
587 useBounceBufferSize);
588 }
589
590 // Enforce high address restriction
591 if (base > fRestrictions.high_address)
592 useBounceBufferSize = length;
593 else if (base + length > fRestrictions.high_address)
594 length = fRestrictions.high_address - base;
595
596 // Align length as well
597 if (useBounceBufferSize == 0)
598 length &= ~(fRestrictions.alignment - 1);
599
600 // If length is 0, use bounce buffer for complete vec.
601 if (length == 0) {
602 length = maxLength;
603 useBounceBufferSize = length;
604 TRACE(" vec %" B_PRIu32 ": 0 length, using bounce buffer: %lu\n", i,
605 useBounceBufferSize);
606 }
607
608 if (useBounceBufferSize > 0) {
609 // alignment could still be wrong (we round up here)
610 useBounceBufferSize = (useBounceBufferSize
611 + fRestrictions.alignment - 1) & ~(fRestrictions.alignment - 1);
612
613 length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
614 bounceLeft, useBounceBufferSize, false);
615 if (length == 0) {
616 TRACE(" vec %" B_PRIu32 ": out of bounce buffer space\n", i);
617 // We don't have any bounce buffer space left, we need to move
618 // this request to the next I/O operation.
619 break;
620 }
621 TRACE(" vec %" B_PRIu32 ": final bounce length: %lu\n", i, length);
622 } else {
623 TRACE(" vec %" B_PRIu32 ": final length restriction: %lu\n", i, length);
624 dmaBuffer->AddVec(base, length);
625 }
626
627 dmaLength += length;
628 vecOffset += length;
629 transferLeft -= min_c(length, transferLeft);
630 }
631
632 // If we're writing partially, we always need to have a block sized bounce
633 // buffer (or else we would overwrite memory to be written on the read in
634 // the first phase).
635 off_t requestEnd = request->Offset() + request->Length();
636 if (request->IsWrite()) {
637 generic_size_t diff = dmaLength & (fBlockSize - 1);
638
639 // If the transfer length is block aligned and we're writing past the
640 // end of the given data, we still have to check the whether the last
641 // vec is a bounce buffer segment shorter than the block size. If so, we
642 // have to cut back the complete block and use a bounce buffer for it
643 // entirely.
644 if (diff == 0 && offset + (off_t)dmaLength > requestEnd) {
645 const generic_io_vec& dmaVec
646 = dmaBuffer->VecAt(dmaBuffer->VecCount() - 1);
647 ASSERT(dmaVec.base >= dmaBuffer->PhysicalBounceBufferAddress()
648 && dmaVec.base
649 < dmaBuffer->PhysicalBounceBufferAddress()
650 + fBounceBufferSize);
651 // We can be certain that the last vec is a bounce buffer vec,
652 // since otherwise the DMA buffer couldn't exceed the end of the
653 // request data.
654 if (dmaVec.length < fBlockSize)
655 diff = fBlockSize;
656 }
657
658 if (diff != 0) {
659 // Not yet block aligned -- cut back to the previous block and add
660 // a block-sized bounce buffer segment.
661 TRACE(" partial end write: %lu, diff %lu\n", dmaLength, diff);
662
663 _CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, diff);
664 dmaLength -= diff;
665
666 if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
667 bounceLeft, fBlockSize, true) == 0) {
668 // If we cannot write anything, we can't process the request at
669 // all.
670 TRACE(" adding bounce buffer failed!!!\n");
671 if (dmaLength == 0)
672 return B_BAD_VALUE;
673 } else
674 dmaLength += fBlockSize;
675 }
676 }
677
678 // If total length not block aligned, use bounce buffer for padding (read
679 // case only).
680 while ((dmaLength & (fBlockSize - 1)) != 0) {
681 TRACE(" dmaLength not block aligned: %lu\n", dmaLength);
682 generic_size_t length
683 = (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1);
684
685 // If total length > max transfer size, segment count > max segment
686 // count, truncate.
687 // TODO: sometimes we can replace the last vec with the bounce buffer
688 // to let it match the restrictions.
689 if (length > fRestrictions.max_transfer_size
690 || dmaBuffer->VecCount() == fRestrictions.max_segment_count
691 || bounceLeft < length - dmaLength) {
692 // cut the part of dma length
693 TRACE(" can't align length due to max transfer size, segment "
694 "count restrictions, or lacking bounce buffer space\n");
695 generic_size_t toCut = dmaLength
696 & (max_c(fBlockSize, fRestrictions.alignment) - 1);
697 dmaLength -= toCut;
698 if (dmaLength == 0) {
699 // This can only happen, when we have too many small segments
700 // and hit the max segment count. In this case we just use the
701 // bounce buffer for as much as possible of the total length.
702 dmaBuffer->SetVecCount(0);
703 generic_addr_t base = dmaBuffer->PhysicalBounceBufferAddress();
704 dmaLength = min_c(totalLength, fBounceBufferSize)
705 & ~(max_c(fBlockSize, fRestrictions.alignment) - 1);
706 _RestrictBoundaryAndSegmentSize(base, dmaLength);
707 dmaBuffer->AddVec(base, dmaLength);
708
709 physicalBounceBuffer = base + dmaLength;
710 bounceLeft = fBounceBufferSize - dmaLength;
711 } else {
712 _CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, toCut);
713 }
714 } else {
715 TRACE(" adding %lu bytes final bounce buffer\n",
716 length - dmaLength);
717 length -= dmaLength;
718 length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
719 bounceLeft, length, true);
720 if (length == 0)
721 panic("don't do this to me!");
722 dmaLength += length;
723 }
724 }
725
726 operation->SetBuffer(dmaBuffer);
727 operation->SetBlockSize(fBlockSize);
728 operation->SetOriginalRange(originalOffset,
729 min_c(offset + (off_t)dmaLength, requestEnd) - originalOffset);
730 operation->SetRange(offset, dmaLength);
731 operation->SetPartial(partialBegin != 0,
732 offset + (off_t)dmaLength > requestEnd);
733
734 // If we don't need the bounce buffer, we put it back, otherwise
735 operation->SetUsesBounceBuffer(bounceLeft < fBounceBufferSize);
736 if (operation->UsesBounceBuffer())
737 fBounceBuffers.RemoveHead();
738 else
739 dmaBuffer->SetBounceBuffer(NULL);
740
741
742 status_t error = operation->Prepare(request);
743 if (error != B_OK)
744 return error;
745
746 request->Advance(operation->OriginalLength());
747
748 return B_OK;
749 }
750
751
752 void
RecycleBuffer(DMABuffer * buffer)753 DMAResource::RecycleBuffer(DMABuffer* buffer)
754 {
755 if (buffer == NULL)
756 return;
757
758 MutexLocker _(fLock);
759 fDMABuffers.Add(buffer);
760 if (buffer->BounceBuffer() != NULL) {
761 fBounceBuffers.Add(buffer->BounceBuffer());
762 buffer->SetBounceBuffer(NULL);
763 }
764 }
765
766
767 bool
_NeedsBoundsBuffers() const768 DMAResource::_NeedsBoundsBuffers() const
769 {
770 return fRestrictions.alignment > 1
771 || fRestrictions.low_address != 0
772 || fRestrictions.high_address != ~(generic_addr_t)0
773 || fBlockSize > 1;
774 }
775
776
777
778
779 #if 0
780
781
782 status_t
783 create_dma_resource(restrictions)
784 {
785 // Restrictions are: transfer size, address space, alignment
786 // segment min/max size, num segments
787 }
788
789
790 void
791 delete_dma_resource(resource)
792 {
793 }
794
795
796 dma_buffer_alloc(resource, size)
797 {
798 }
799
800
801 dma_buffer_free(buffer)
802 {
803 // Allocates or frees memory in that DMA buffer.
804 }
805
806 #endif // 0
807