1 /*
2 * Copyright 2019-2022, Haiku, Inc. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 * Augustin Cavalier <waddlesplash>
7 */
8
9 extern "C" {
10 #include <sys/malloc.h>
11 #include <sys/bus.h>
12 #include <sys/lock.h>
13 #include <sys/mutex.h>
14 #include <sys/mbuf.h>
15
16 #include <machine/bus.h>
17 #include <vm/vm_extern.h>
18
19 phys_addr_t vm_page_max_address();
20 // declared in <vm/vm_page.h> which we can't include here.
21 }
22
23
24 // #pragma mark - structures
25
26
27 struct bus_dma_tag {
28 bus_dma_tag_t parent;
29 int32 ref_count;
30 int32 map_count;
31
32 int flags;
33 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS1
34
35 phys_size_t alignment;
36 phys_addr_t boundary;
37 phys_addr_t lowaddr;
38 phys_addr_t highaddr;
39
40 phys_size_t maxsize;
41 uint32 maxsegments;
42 phys_size_t maxsegsz;
43 };
44
45 struct bus_dmamap {
46 bus_dma_tag_t dmat;
47
48 bus_dma_segment_t* segments;
49 int nsegs;
50
51 void* bounce_buffer;
52 bus_size_t bounce_buffer_size;
53
54 enum {
55 BUFFER_NONE = 0,
56 BUFFER_PROHIBITED,
57
58 BUFFER_TYPE_SIMPLE,
59 BUFFER_TYPE_MBUF,
60 } buffer_type;
61 union {
62 struct {
63 void* buffer;
64 bus_size_t buffer_length;
65 };
66 struct mbuf* mbuf;
67 };
68 };
69
70
71 // #pragma mark - functions
72
73
74 extern "C" void
busdma_lock_mutex(void * arg,bus_dma_lock_op_t op)75 busdma_lock_mutex(void* arg, bus_dma_lock_op_t op)
76 {
77 struct mtx* dmtx = (struct mtx*)arg;
78 switch (op) {
79 case BUS_DMA_LOCK:
80 mtx_lock(dmtx);
81 break;
82 case BUS_DMA_UNLOCK:
83 mtx_unlock(dmtx);
84 break;
85 default:
86 panic("busdma_lock_mutex: unknown operation 0x%x", op);
87 }
88 }
89
90
91 extern "C" int
bus_dma_tag_create(bus_dma_tag_t parent,bus_size_t alignment,bus_addr_t boundary,bus_addr_t lowaddr,bus_addr_t highaddr,bus_dma_filter_t * filter,void * filterarg,bus_size_t maxsize,int nsegments,bus_size_t maxsegsz,int flags,bus_dma_lock_t * lockfunc,void * lockfuncarg,bus_dma_tag_t * dmat)92 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary,
93 bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t* filter,
94 void* filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz,
95 int flags, bus_dma_lock_t* lockfunc, void* lockfuncarg, bus_dma_tag_t* dmat)
96 {
97 if (maxsegsz == 0)
98 return EINVAL;
99 if (filter != NULL) {
100 panic("bus_dma_tag_create: error: filters not supported!");
101 return EOPNOTSUPP;
102 }
103
104 bus_dma_tag_t newtag = (bus_dma_tag_t)kernel_malloc(sizeof(*newtag),
105 M_DEVBUF, M_ZERO | M_NOWAIT);
106 if (newtag == NULL)
107 return ENOMEM;
108
109 if (boundary != 0 && boundary < maxsegsz)
110 maxsegsz = boundary;
111
112 newtag->alignment = alignment;
113 newtag->boundary = boundary;
114 newtag->lowaddr = lowaddr;
115 newtag->highaddr = highaddr;
116 newtag->maxsize = maxsize;
117 newtag->maxsegments = nsegments;
118 newtag->maxsegsz = maxsegsz;
119 newtag->flags = flags;
120 newtag->ref_count = 1;
121 newtag->map_count = 0;
122
123 // lockfunc is only needed if callbacks will be invoked asynchronously.
124
125 if (parent != NULL) {
126 newtag->parent = parent;
127 atomic_add(&parent->ref_count, 1);
128
129 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
130 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
131 newtag->alignment = MAX(parent->alignment, newtag->alignment);
132
133 if (newtag->boundary == 0) {
134 newtag->boundary = parent->boundary;
135 } else if (parent->boundary != 0) {
136 newtag->boundary = MIN(parent->boundary, newtag->boundary);
137 }
138 }
139
140 if (newtag->lowaddr < vm_page_max_address())
141 newtag->flags |= BUS_DMA_COULD_BOUNCE;
142 if (newtag->alignment > 1)
143 newtag->flags |= BUS_DMA_COULD_BOUNCE;
144
145 *dmat = newtag;
146 return 0;
147 }
148
149
150 extern "C" int
bus_dma_tag_destroy(bus_dma_tag_t dmat)151 bus_dma_tag_destroy(bus_dma_tag_t dmat)
152 {
153 if (dmat == NULL)
154 return 0;
155 if (dmat->map_count != 0)
156 return EBUSY;
157
158 while (dmat != NULL) {
159 bus_dma_tag_t parent;
160
161 parent = dmat->parent;
162 atomic_add(&dmat->ref_count, -1);
163 if (dmat->ref_count == 0) {
164 kernel_free(dmat, M_DEVBUF);
165
166 // Last reference released, so release our reference on our parent.
167 dmat = parent;
168 } else
169 dmat = NULL;
170 }
171 return 0;
172 }
173
174
175 extern "C" int
bus_dmamap_create(bus_dma_tag_t dmat,int flags,bus_dmamap_t * mapp)176 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t* mapp)
177 {
178 *mapp = (bus_dmamap_t)calloc(sizeof(**mapp), 1);
179 if (*mapp == NULL)
180 return ENOMEM;
181
182 (*mapp)->dmat = dmat;
183 (*mapp)->nsegs = 0;
184 (*mapp)->segments = (bus_dma_segment_t *)calloc(dmat->maxsegments,
185 sizeof(bus_dma_segment_t));
186 if ((*mapp)->segments == NULL) {
187 free((*mapp));
188 *mapp = NULL;
189 return ENOMEM;
190 }
191
192 atomic_add(&dmat->map_count, 1);
193 return 0;
194 }
195
196
197 extern "C" int
bus_dmamap_destroy(bus_dma_tag_t dmat,bus_dmamap_t map)198 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
199 {
200 if (map == NULL)
201 return 0;
202 if (map->buffer_type > bus_dmamap::BUFFER_PROHIBITED)
203 return EBUSY;
204
205 atomic_add(&map->dmat->map_count, -1);
206 kernel_contigfree(map->bounce_buffer, map->bounce_buffer_size, M_DEVBUF);
207 free(map->segments);
208 free(map);
209 return 0;
210 }
211
212
213 static int
_allocate_dmamem(bus_dma_tag_t dmat,phys_size_t size,void ** vaddr,int flags)214 _allocate_dmamem(bus_dma_tag_t dmat, phys_size_t size, void** vaddr, int flags)
215 {
216 int mflags;
217 if (flags & BUS_DMA_NOWAIT)
218 mflags = M_NOWAIT;
219 else
220 mflags = M_WAITOK;
221
222 if (flags & BUS_DMA_ZERO)
223 mflags |= M_ZERO;
224
225 // FreeBSD uses standard malloc() for the case where size <= PAGE_SIZE,
226 // but we want to keep DMA'd memory a bit more separate, so we always use
227 // contigmalloc.
228
229 // The range specified by lowaddr, highaddr is an *exclusion* range,
230 // not an inclusion range. So we want to at least start with the low end,
231 // if possible. (The most common exclusion range is 32-bit only, and
232 // ones other than that are very rare, so typically this will succeed.)
233 if (dmat->lowaddr > B_PAGE_SIZE) {
234 *vaddr = kernel_contigmalloc(size, M_DEVBUF, mflags,
235 0, dmat->lowaddr,
236 dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
237 if (*vaddr == NULL)
238 dprintf("bus_dmamem_alloc: failed to allocate with lowaddr "
239 "0x%" B_PRIxPHYSADDR "\n", dmat->lowaddr);
240 }
241 if (*vaddr == NULL && dmat->highaddr < BUS_SPACE_MAXADDR) {
242 *vaddr = kernel_contigmalloc(size, M_DEVBUF, mflags,
243 dmat->highaddr, BUS_SPACE_MAXADDR,
244 dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
245 }
246
247 if (*vaddr == NULL) {
248 dprintf("bus_dmamem_alloc: failed to allocate for tag (size %d, "
249 "low 0x%" B_PRIxPHYSADDR ", high 0x%" B_PRIxPHYSADDR ", "
250 "boundary 0x%" B_PRIxPHYSADDR ")\n",
251 (int)size, dmat->lowaddr, dmat->highaddr, dmat->boundary);
252 return ENOMEM;
253 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
254 dprintf("bus_dmamem_alloc: failed to align memory: wanted %#x, got %#x\n",
255 dmat->alignment, vtophys(vaddr));
256 bus_dmamem_free_tagless(*vaddr, size);
257 return ENOMEM;
258 }
259
260 return 0;
261 }
262
263
264 extern "C" int
bus_dmamem_alloc(bus_dma_tag_t dmat,void ** vaddr,int flags,bus_dmamap_t * mapp)265 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
266 bus_dmamap_t* mapp)
267 {
268 // FreeBSD does not permit the "mapp" argument to be NULL, but we do
269 // (primarily for the OpenBSD shims.)
270 if (mapp != NULL) {
271 bus_dmamap_create(dmat, flags, mapp);
272
273 // Drivers assume dmamem will never be bounced, so ensure that.
274 (*mapp)->buffer_type = bus_dmamap::BUFFER_PROHIBITED;
275 }
276
277 int status = _allocate_dmamem(dmat, dmat->maxsize, vaddr, flags);
278 if (status != 0 && mapp != NULL)
279 bus_dmamap_destroy(dmat, *mapp);
280 return status;
281 }
282
283
284 extern "C" void
bus_dmamem_free_tagless(void * vaddr,size_t size)285 bus_dmamem_free_tagless(void* vaddr, size_t size)
286 {
287 kernel_contigfree(vaddr, size, M_DEVBUF);
288 }
289
290
291 extern "C" void
bus_dmamem_free(bus_dma_tag_t dmat,void * vaddr,bus_dmamap_t map)292 bus_dmamem_free(bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map)
293 {
294 bus_dmamem_free_tagless(vaddr, dmat->maxsize);
295 bus_dmamap_destroy(dmat, map);
296 }
297
298
299 static int
_prepare_bounce_buffer(bus_dmamap_t map,bus_size_t reqsize,int flags)300 _prepare_bounce_buffer(bus_dmamap_t map, bus_size_t reqsize, int flags)
301 {
302 if (map->buffer_type == bus_dmamap::BUFFER_PROHIBITED) {
303 panic("cannot bounce, direct DMA only!");
304 return B_NOT_ALLOWED;
305 }
306 if (map->buffer_type != bus_dmamap::BUFFER_NONE) {
307 panic("bounce buffer already in use! (type %d)", map->buffer_type);
308 return EBUSY;
309 }
310
311 if (map->bounce_buffer_size >= reqsize)
312 return 0;
313
314 if (map->bounce_buffer != NULL) {
315 kernel_contigfree(map->bounce_buffer, map->bounce_buffer_size, 0);
316 map->bounce_buffer = NULL;
317 map->bounce_buffer_size = 0;
318 }
319
320 // The contiguous allocator will round up anyway, so we might as well
321 // do it first so that we know how large our buffer really is.
322 reqsize = roundup(reqsize, B_PAGE_SIZE);
323
324 int error = _allocate_dmamem(map->dmat, reqsize, &map->bounce_buffer, flags);
325 if (error != 0)
326 return error;
327 map->bounce_buffer_size = reqsize;
328
329 return 0;
330 }
331
332
333 static bool
_validate_address(bus_dma_tag_t dmat,bus_addr_t paddr,bool validate_alignment=true)334 _validate_address(bus_dma_tag_t dmat, bus_addr_t paddr, bool validate_alignment = true)
335 {
336 if (paddr > dmat->lowaddr && paddr <= dmat->highaddr)
337 return false;
338 if (validate_alignment && !vm_addr_align_ok(paddr, dmat->alignment))
339 return false;
340
341 return true;
342 }
343
344
345 static int
_bus_load_buffer(bus_dma_tag_t dmat,void * buf,bus_size_t buflen,int flags,bus_addr_t & last_phys_addr,bus_dma_segment_t * segs,int & seg,bool first)346 _bus_load_buffer(bus_dma_tag_t dmat, void* buf, bus_size_t buflen,
347 int flags, bus_addr_t& last_phys_addr, bus_dma_segment_t* segs,
348 int& seg, bool first)
349 {
350 vm_offset_t virtual_addr = (vm_offset_t)buf;
351 const bus_addr_t boundary_mask = ~(dmat->boundary - 1);
352
353 while (buflen > 0) {
354 const bus_addr_t phys_addr = pmap_kextract(virtual_addr);
355
356 bus_size_t segment_size = PAGESIZE - (phys_addr & PAGE_MASK);
357 if (segment_size > buflen)
358 segment_size = buflen;
359 if (segment_size > dmat->maxsegsz)
360 segment_size = dmat->maxsegsz;
361
362 if (dmat->boundary > 0) {
363 // Make sure we don't cross a boundary.
364 bus_addr_t boundary_addr = (phys_addr + dmat->boundary) & boundary_mask;
365 if (segment_size > (boundary_addr - phys_addr))
366 segment_size = (boundary_addr - phys_addr);
367 }
368
369 // If possible, coalesce into the previous segment.
370 if (!first && phys_addr == last_phys_addr
371 && (segs[seg].ds_len + segment_size) <= dmat->maxsegsz
372 && (dmat->boundary == 0
373 || (segs[seg].ds_addr & boundary_mask)
374 == (phys_addr & boundary_mask))) {
375 if (!_validate_address(dmat, phys_addr, false))
376 return ERANGE;
377
378 segs[seg].ds_len += segment_size;
379 } else {
380 if (first)
381 first = false;
382 else if (++seg >= dmat->maxsegments)
383 break;
384
385 if (!_validate_address(dmat, phys_addr))
386 return ERANGE;
387
388 segs[seg].ds_addr = phys_addr;
389 segs[seg].ds_len = segment_size;
390 }
391
392 last_phys_addr = phys_addr + segment_size;
393 virtual_addr += segment_size;
394 buflen -= segment_size;
395 }
396
397 return (buflen != 0 ? EFBIG : 0);
398 }
399
400
401 extern "C" int
bus_dmamap_load(bus_dma_tag_t dmat,bus_dmamap_t map,void * buf,bus_size_t buflen,bus_dmamap_callback_t * callback,void * callback_arg,int flags)402 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
403 bus_size_t buflen, bus_dmamap_callback_t *callback,
404 void *callback_arg, int flags)
405 {
406 bus_addr_t lastaddr = 0;
407 int error, seg = 0;
408
409 if (buflen > dmat->maxsize)
410 return EINVAL;
411
412 error = _bus_load_buffer(dmat, buf, buflen, flags,
413 lastaddr, map->segments, seg, true);
414
415 if (error != 0) {
416 // Try again using a bounce buffer.
417 error = _prepare_bounce_buffer(map, buflen, flags);
418 if (error != 0)
419 return error;
420
421 map->buffer_type = bus_dmamap::BUFFER_TYPE_SIMPLE;
422 map->buffer = buf;
423 map->buffer_length = buflen;
424
425 seg = lastaddr = 0;
426 error = _bus_load_buffer(dmat, map->bounce_buffer, buflen, flags,
427 lastaddr, map->segments, seg, true);
428 }
429
430 if (error)
431 (*callback)(callback_arg, map->segments, 0, error);
432 else
433 (*callback)(callback_arg, map->segments, seg + 1, 0);
434
435 // ENOMEM is returned; all other errors are only sent to the callback.
436 if (error == ENOMEM)
437 return error;
438 return 0;
439 }
440
441
442 extern "C" int
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * mb,bus_dma_segment_t * segs,int * _nsegs,int flags)443 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
444 bus_dma_segment_t* segs, int* _nsegs, int flags)
445 {
446 M_ASSERTPKTHDR(mb);
447
448 if (mb->m_pkthdr.len > dmat->maxsize)
449 return EINVAL;
450
451 int seg = 0, error = 0;
452 bool first = true;
453 bus_addr_t lastaddr = 0;
454 flags |= BUS_DMA_NOWAIT;
455
456 for (struct mbuf* m = mb; m != NULL && error == 0; m = m->m_next) {
457 if (m->m_len <= 0)
458 continue;
459
460 error = _bus_load_buffer(dmat, m->m_data, m->m_len,
461 flags, lastaddr, segs, seg, first);
462 first = false;
463 }
464
465 if (error != 0) {
466 // Try again using a bounce buffer.
467 error = _prepare_bounce_buffer(map, mb->m_pkthdr.len, flags);
468 if (error != 0)
469 return error;
470
471 map->buffer_type = bus_dmamap::BUFFER_TYPE_MBUF;
472 map->mbuf = mb;
473
474 seg = lastaddr = 0;
475 error = _bus_load_buffer(dmat, map->bounce_buffer, mb->m_pkthdr.len, flags,
476 lastaddr, segs, seg, true);
477 }
478
479 *_nsegs = seg + 1;
480 return error;
481 }
482
483
484 extern "C" int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * mb,bus_dmamap_callback2_t * callback,void * callback_arg,int flags)485 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
486 bus_dmamap_callback2_t* callback, void* callback_arg, int flags)
487 {
488 int nsegs, error;
489 error = bus_dmamap_load_mbuf_sg(dmat, map, mb, map->segments, &nsegs, flags);
490
491 if (error) {
492 (*callback)(callback_arg, map->segments, 0, 0, error);
493 } else {
494 (*callback)(callback_arg, map->segments, nsegs, mb->m_pkthdr.len,
495 error);
496 }
497 return error;
498 }
499
500
501 extern "C" void
bus_dmamap_unload(bus_dma_tag_t dmat,bus_dmamap_t map)502 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
503 {
504 if (map == NULL)
505 return;
506
507 if (map->buffer_type != bus_dmamap::BUFFER_PROHIBITED)
508 map->buffer_type = bus_dmamap::BUFFER_NONE;
509 map->buffer = NULL;
510 }
511
512
513 extern "C" void
bus_dmamap_sync(bus_dma_tag_t dmat,bus_dmamap_t map,bus_dmasync_op_t op)514 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
515 {
516 if (map == NULL)
517 return;
518
519 bus_size_t length = 0;
520 switch (map->buffer_type) {
521 case bus_dmamap::BUFFER_NONE:
522 case bus_dmamap::BUFFER_PROHIBITED:
523 // Nothing to do.
524 return;
525
526 case bus_dmamap::BUFFER_TYPE_SIMPLE:
527 length = map->buffer_length;
528 break;
529
530 case bus_dmamap::BUFFER_TYPE_MBUF:
531 length = map->mbuf->m_pkthdr.len;
532 break;
533
534 default:
535 panic("unknown buffer type");
536 }
537
538 bus_dmamap_sync_etc(dmat, map, 0, length, op);
539 }
540
541
542 extern "C" void
bus_dmamap_sync_etc(bus_dma_tag_t dmat,bus_dmamap_t map,bus_addr_t offset,bus_size_t length,bus_dmasync_op_t op)543 bus_dmamap_sync_etc(bus_dma_tag_t dmat, bus_dmamap_t map,
544 bus_addr_t offset, bus_size_t length, bus_dmasync_op_t op)
545 {
546 if (map == NULL)
547 return;
548
549 if ((op & BUS_DMASYNC_PREWRITE) != 0) {
550 // "Pre-write": after CPU writes, before device reads.
551 switch (map->buffer_type) {
552 case bus_dmamap::BUFFER_NONE:
553 case bus_dmamap::BUFFER_PROHIBITED:
554 // Nothing to do.
555 break;
556
557 case bus_dmamap::BUFFER_TYPE_SIMPLE:
558 KASSERT((offset + length) <= map->buffer_length, ("mis-sized sync"));
559 memcpy((caddr_t)map->bounce_buffer + offset,
560 (caddr_t)map->buffer + offset, length);
561 break;
562
563 case bus_dmamap::BUFFER_TYPE_MBUF:
564 m_copydata(map->mbuf, offset, length,
565 (caddr_t)map->bounce_buffer + offset);
566 break;
567
568 default:
569 panic("unknown buffer type");
570 }
571
572 memory_write_barrier();
573 }
574
575 if ((op & BUS_DMASYNC_POSTREAD) != 0) {
576 // "Post-read": after device writes, before CPU reads.
577 memory_read_barrier();
578
579 switch (map->buffer_type) {
580 case bus_dmamap::BUFFER_NONE:
581 case bus_dmamap::BUFFER_PROHIBITED:
582 // Nothing to do.
583 break;
584
585 case bus_dmamap::BUFFER_TYPE_SIMPLE:
586 KASSERT((offset + length) <= map->buffer_length, ("mis-sized sync"));
587 memcpy((caddr_t)map->buffer + offset,
588 (caddr_t)map->bounce_buffer + offset, length);
589 break;
590
591 case bus_dmamap::BUFFER_TYPE_MBUF:
592 m_copyback(map->mbuf, offset, length,
593 (caddr_t)map->bounce_buffer + offset);
594 break;
595
596 default:
597 panic("unknown buffer type");
598 }
599 }
600 }
601