1 /* 2 * Copyright 2019-2022, Haiku, Inc. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Augustin Cavalier <waddlesplash> 7 */ 8 9 extern "C" { 10 #include <sys/malloc.h> 11 #include <sys/bus.h> 12 #include <sys/lock.h> 13 #include <sys/mutex.h> 14 #include <sys/mbuf.h> 15 16 #include <machine/bus.h> 17 #include <vm/vm_extern.h> 18 } 19 20 #include <vm/vm_page.h> 21 22 23 // #pragma mark - structures 24 25 26 struct bus_dma_tag { 27 bus_dma_tag_t parent; 28 int32 ref_count; 29 int32 map_count; 30 31 int flags; 32 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS1 33 34 phys_size_t alignment; 35 phys_addr_t boundary; 36 phys_addr_t lowaddr; 37 phys_addr_t highaddr; 38 39 phys_size_t maxsize; 40 uint32 maxsegments; 41 phys_size_t maxsegsz; 42 }; 43 44 struct bus_dmamap { 45 bus_dma_tag_t dmat; 46 47 bus_dma_segment_t* segments; 48 int nsegs; 49 50 void* bounce_buffer; 51 bus_size_t bounce_buffer_size; 52 53 enum { 54 BUFFER_NONE = 0, 55 BUFFER_PROHIBITED, 56 57 BUFFER_TYPE_SIMPLE, 58 BUFFER_TYPE_MBUF, 59 } buffer_type; 60 union { 61 struct { 62 void* buffer; 63 bus_size_t buffer_length; 64 }; 65 struct mbuf* mbuf; 66 }; 67 }; 68 69 70 // #pragma mark - functions 71 72 73 extern "C" void 74 busdma_lock_mutex(void* arg, bus_dma_lock_op_t op) 75 { 76 struct mtx* dmtx = (struct mtx*)arg; 77 switch (op) { 78 case BUS_DMA_LOCK: 79 mtx_lock(dmtx); 80 break; 81 case BUS_DMA_UNLOCK: 82 mtx_unlock(dmtx); 83 break; 84 default: 85 panic("busdma_lock_mutex: unknown operation 0x%x", op); 86 } 87 } 88 89 90 extern "C" int 91 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, 92 bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t* filter, 93 void* filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, 94 int flags, bus_dma_lock_t* lockfunc, void* lockfuncarg, bus_dma_tag_t* dmat) 95 { 96 if (maxsegsz == 0) 97 return EINVAL; 98 if (filter != NULL) { 99 panic("bus_dma_tag_create: error: filters not supported!"); 100 return EOPNOTSUPP; 101 } 102 103 bus_dma_tag_t newtag = (bus_dma_tag_t)kernel_malloc(sizeof(*newtag), 104 M_DEVBUF, M_ZERO | M_NOWAIT); 105 if (newtag == NULL) 106 return ENOMEM; 107 108 if (boundary != 0 && boundary < maxsegsz) 109 maxsegsz = boundary; 110 111 newtag->alignment = alignment; 112 newtag->boundary = boundary; 113 newtag->lowaddr = lowaddr; 114 newtag->highaddr = highaddr; 115 newtag->maxsize = maxsize; 116 newtag->maxsegments = nsegments; 117 newtag->maxsegsz = maxsegsz; 118 newtag->flags = flags; 119 newtag->ref_count = 1; 120 newtag->map_count = 0; 121 122 // lockfunc is only needed if callbacks will be invoked asynchronously. 123 124 if (parent != NULL) { 125 newtag->parent = parent; 126 atomic_add(&parent->ref_count, 1); 127 128 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); 129 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); 130 newtag->alignment = MAX(parent->alignment, newtag->alignment); 131 132 if (newtag->boundary == 0) { 133 newtag->boundary = parent->boundary; 134 } else if (parent->boundary != 0) { 135 newtag->boundary = MIN(parent->boundary, newtag->boundary); 136 } 137 } 138 139 if (newtag->lowaddr < vm_page_max_address()) 140 newtag->flags |= BUS_DMA_COULD_BOUNCE; 141 if (newtag->alignment > 1) 142 newtag->flags |= BUS_DMA_COULD_BOUNCE; 143 144 *dmat = newtag; 145 return 0; 146 } 147 148 149 extern "C" int 150 bus_dma_tag_destroy(bus_dma_tag_t dmat) 151 { 152 if (dmat == NULL) 153 return 0; 154 if (dmat->map_count != 0) 155 return EBUSY; 156 157 while (dmat != NULL) { 158 bus_dma_tag_t parent; 159 160 parent = dmat->parent; 161 atomic_add(&dmat->ref_count, -1); 162 if (dmat->ref_count == 0) { 163 kernel_free(dmat, M_DEVBUF); 164 165 // Last reference released, so release our reference on our parent. 166 dmat = parent; 167 } else 168 dmat = NULL; 169 } 170 return 0; 171 } 172 173 174 extern "C" int 175 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t* mapp) 176 { 177 *mapp = (bus_dmamap_t)calloc(sizeof(**mapp), 1); 178 if (*mapp == NULL) 179 return ENOMEM; 180 181 (*mapp)->dmat = dmat; 182 (*mapp)->nsegs = 0; 183 (*mapp)->segments = (bus_dma_segment_t *)calloc(dmat->maxsegments, 184 sizeof(bus_dma_segment_t)); 185 if ((*mapp)->segments == NULL) { 186 free((*mapp)); 187 *mapp = NULL; 188 return ENOMEM; 189 } 190 191 atomic_add(&dmat->map_count, 1); 192 return 0; 193 } 194 195 196 extern "C" int 197 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) 198 { 199 if (map == NULL) 200 return 0; 201 if (map->buffer_type > bus_dmamap::BUFFER_PROHIBITED) 202 return EBUSY; 203 204 atomic_add(&map->dmat->map_count, -1); 205 kernel_contigfree(map->bounce_buffer, map->bounce_buffer_size, M_DEVBUF); 206 free(map->segments); 207 free(map); 208 return 0; 209 } 210 211 212 static int 213 _allocate_dmamem(bus_dma_tag_t dmat, phys_size_t size, void** vaddr, int flags) 214 { 215 int mflags; 216 if (flags & BUS_DMA_NOWAIT) 217 mflags = M_NOWAIT; 218 else 219 mflags = M_WAITOK; 220 221 if (flags & BUS_DMA_ZERO) 222 mflags |= M_ZERO; 223 224 // FreeBSD uses standard malloc() for the case where size <= PAGE_SIZE, 225 // but we want to keep DMA'd memory a bit more separate, so we always use 226 // contigmalloc. 227 228 // The range specified by lowaddr, highaddr is an *exclusion* range, 229 // not an inclusion range. So we want to at least start with the low end, 230 // if possible. (The most common exclusion range is 32-bit only, and 231 // ones other than that are very rare, so typically this will succeed.) 232 if (dmat->lowaddr > B_PAGE_SIZE) { 233 *vaddr = kernel_contigmalloc(size, M_DEVBUF, mflags, 234 0, dmat->lowaddr, 235 dmat->alignment ? dmat->alignment : 1ul, dmat->boundary); 236 if (*vaddr == NULL) 237 dprintf("bus_dmamem_alloc: failed to allocate with lowaddr " 238 "0x%" B_PRIxPHYSADDR "\n", dmat->lowaddr); 239 } 240 if (*vaddr == NULL && dmat->highaddr < BUS_SPACE_MAXADDR) { 241 *vaddr = kernel_contigmalloc(size, M_DEVBUF, mflags, 242 dmat->highaddr, BUS_SPACE_MAXADDR, 243 dmat->alignment ? dmat->alignment : 1ul, dmat->boundary); 244 } 245 246 if (*vaddr == NULL) { 247 dprintf("bus_dmamem_alloc: failed to allocate for tag (size %d, " 248 "low 0x%" B_PRIxPHYSADDR ", high 0x%" B_PRIxPHYSADDR ", " 249 "boundary 0x%" B_PRIxPHYSADDR ")\n", 250 (int)size, dmat->lowaddr, dmat->highaddr, dmat->boundary); 251 return ENOMEM; 252 } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { 253 dprintf("bus_dmamem_alloc: failed to align memory: wanted %#x, got %#x\n", 254 dmat->alignment, vtophys(vaddr)); 255 bus_dmamem_free_tagless(*vaddr, size); 256 return ENOMEM; 257 } 258 259 return 0; 260 } 261 262 263 extern "C" int 264 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 265 bus_dmamap_t* mapp) 266 { 267 // FreeBSD does not permit the "mapp" argument to be NULL, but we do 268 // (primarily for the OpenBSD shims.) 269 if (mapp != NULL) { 270 bus_dmamap_create(dmat, flags, mapp); 271 272 // Drivers assume dmamem will never be bounced, so ensure that. 273 (*mapp)->buffer_type = bus_dmamap::BUFFER_PROHIBITED; 274 } 275 276 int status = _allocate_dmamem(dmat, dmat->maxsize, vaddr, flags); 277 if (status != 0 && mapp != NULL) 278 bus_dmamap_destroy(dmat, *mapp); 279 return status; 280 } 281 282 283 extern "C" void 284 bus_dmamem_free_tagless(void* vaddr, size_t size) 285 { 286 kernel_contigfree(vaddr, size, M_DEVBUF); 287 } 288 289 290 extern "C" void 291 bus_dmamem_free(bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map) 292 { 293 bus_dmamem_free_tagless(vaddr, dmat->maxsize); 294 bus_dmamap_destroy(dmat, map); 295 } 296 297 298 static int 299 _prepare_bounce_buffer(bus_dmamap_t map, bus_size_t reqsize, int flags) 300 { 301 if (map->buffer_type == bus_dmamap::BUFFER_PROHIBITED) { 302 panic("cannot bounce, direct DMA only!"); 303 return B_NOT_ALLOWED; 304 } 305 if (map->buffer_type != bus_dmamap::BUFFER_NONE) { 306 panic("bounce buffer already in use! (type %d)", map->buffer_type); 307 return EBUSY; 308 } 309 310 if (map->bounce_buffer_size >= reqsize) 311 return 0; 312 313 if (map->bounce_buffer != NULL) { 314 kernel_contigfree(map->bounce_buffer, map->bounce_buffer_size, 0); 315 map->bounce_buffer = NULL; 316 map->bounce_buffer_size = 0; 317 } 318 319 // The contiguous allocator will round up anyway, so we might as well 320 // do it first so that we know how large our buffer really is. 321 reqsize = ROUNDUP(reqsize, B_PAGE_SIZE); 322 323 int error = _allocate_dmamem(map->dmat, reqsize, &map->bounce_buffer, flags); 324 if (error != 0) 325 return error; 326 map->bounce_buffer_size = reqsize; 327 328 return 0; 329 } 330 331 332 static bool 333 _validate_address(bus_dma_tag_t dmat, bus_addr_t paddr, bool validate_alignment = true) 334 { 335 if (paddr > dmat->lowaddr && paddr <= dmat->highaddr) 336 return false; 337 if (validate_alignment && !vm_addr_align_ok(paddr, dmat->alignment)) 338 return false; 339 340 return true; 341 } 342 343 344 static int 345 _bus_load_buffer(bus_dma_tag_t dmat, void* buf, bus_size_t buflen, 346 int flags, bus_addr_t& last_phys_addr, bus_dma_segment_t* segs, 347 int& seg, bool first) 348 { 349 vm_offset_t virtual_addr = (vm_offset_t)buf; 350 const bus_addr_t boundary_mask = ~(dmat->boundary - 1); 351 352 while (buflen > 0) { 353 const bus_addr_t phys_addr = pmap_kextract(virtual_addr); 354 355 bus_size_t segment_size = PAGESIZE - (phys_addr & PAGE_MASK); 356 if (segment_size > buflen) 357 segment_size = buflen; 358 if (segment_size > dmat->maxsegsz) 359 segment_size = dmat->maxsegsz; 360 361 if (dmat->boundary > 0) { 362 // Make sure we don't cross a boundary. 363 bus_addr_t boundary_addr = (phys_addr + dmat->boundary) & boundary_mask; 364 if (segment_size > (boundary_addr - phys_addr)) 365 segment_size = (boundary_addr - phys_addr); 366 } 367 368 // If possible, coalesce into the previous segment. 369 if (!first && phys_addr == last_phys_addr 370 && (segs[seg].ds_len + segment_size) <= dmat->maxsegsz 371 && (dmat->boundary == 0 372 || (segs[seg].ds_addr & boundary_mask) 373 == (phys_addr & boundary_mask))) { 374 if (!_validate_address(dmat, phys_addr, false)) 375 return ERANGE; 376 377 segs[seg].ds_len += segment_size; 378 } else { 379 if (first) 380 first = false; 381 else if (++seg >= dmat->maxsegments) 382 break; 383 384 if (!_validate_address(dmat, phys_addr)) 385 return ERANGE; 386 387 segs[seg].ds_addr = phys_addr; 388 segs[seg].ds_len = segment_size; 389 } 390 391 last_phys_addr = phys_addr + segment_size; 392 virtual_addr += segment_size; 393 buflen -= segment_size; 394 } 395 396 return (buflen != 0 ? EFBIG : 0); 397 } 398 399 400 extern "C" int 401 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, 402 bus_size_t buflen, bus_dmamap_callback_t *callback, 403 void *callback_arg, int flags) 404 { 405 bus_addr_t lastaddr = 0; 406 int error, seg = 0; 407 408 if (buflen > dmat->maxsize) 409 return EINVAL; 410 411 error = _bus_load_buffer(dmat, buf, buflen, flags, 412 lastaddr, map->segments, seg, true); 413 414 if (error != 0) { 415 // Try again using a bounce buffer. 416 error = _prepare_bounce_buffer(map, buflen, flags); 417 if (error != 0) 418 return error; 419 420 map->buffer_type = bus_dmamap::BUFFER_TYPE_SIMPLE; 421 map->buffer = buf; 422 map->buffer_length = buflen; 423 424 seg = lastaddr = 0; 425 error = _bus_load_buffer(dmat, map->bounce_buffer, buflen, flags, 426 lastaddr, map->segments, seg, true); 427 } 428 429 if (error) 430 (*callback)(callback_arg, map->segments, 0, error); 431 else 432 (*callback)(callback_arg, map->segments, seg + 1, 0); 433 434 // ENOMEM is returned; all other errors are only sent to the callback. 435 if (error == ENOMEM) 436 return error; 437 return 0; 438 } 439 440 441 extern "C" int 442 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb, 443 bus_dma_segment_t* segs, int* _nsegs, int flags) 444 { 445 M_ASSERTPKTHDR(mb); 446 447 if (mb->m_pkthdr.len > dmat->maxsize) 448 return EINVAL; 449 450 int seg = 0, error = 0; 451 bool first = true; 452 bus_addr_t lastaddr = 0; 453 flags |= BUS_DMA_NOWAIT; 454 455 for (struct mbuf* m = mb; m != NULL && error == 0; m = m->m_next) { 456 if (m->m_len <= 0) 457 continue; 458 459 error = _bus_load_buffer(dmat, m->m_data, m->m_len, 460 flags, lastaddr, segs, seg, first); 461 first = false; 462 } 463 464 if (error != 0) { 465 // Try again using a bounce buffer. 466 error = _prepare_bounce_buffer(map, mb->m_pkthdr.len, flags); 467 if (error != 0) 468 return error; 469 470 map->buffer_type = bus_dmamap::BUFFER_TYPE_MBUF; 471 map->mbuf = mb; 472 473 seg = lastaddr = 0; 474 error = _bus_load_buffer(dmat, map->bounce_buffer, mb->m_pkthdr.len, flags, 475 lastaddr, segs, seg, true); 476 } 477 478 *_nsegs = seg + 1; 479 return error; 480 } 481 482 483 extern "C" int 484 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb, 485 bus_dmamap_callback2_t* callback, void* callback_arg, int flags) 486 { 487 int nsegs, error; 488 error = bus_dmamap_load_mbuf_sg(dmat, map, mb, map->segments, &nsegs, flags); 489 490 if (error) { 491 (*callback)(callback_arg, map->segments, 0, 0, error); 492 } else { 493 (*callback)(callback_arg, map->segments, nsegs, mb->m_pkthdr.len, 494 error); 495 } 496 return error; 497 } 498 499 500 extern "C" void 501 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) 502 { 503 if (map == NULL) 504 return; 505 506 if (map->buffer_type != bus_dmamap::BUFFER_PROHIBITED) 507 map->buffer_type = bus_dmamap::BUFFER_NONE; 508 map->buffer = NULL; 509 } 510 511 512 extern "C" void 513 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) 514 { 515 if (map == NULL) 516 return; 517 518 bus_size_t length = 0; 519 switch (map->buffer_type) { 520 case bus_dmamap::BUFFER_NONE: 521 case bus_dmamap::BUFFER_PROHIBITED: 522 // Nothing to do. 523 return; 524 525 case bus_dmamap::BUFFER_TYPE_SIMPLE: 526 length = map->buffer_length; 527 break; 528 529 case bus_dmamap::BUFFER_TYPE_MBUF: 530 length = map->mbuf->m_pkthdr.len; 531 break; 532 533 default: 534 panic("unknown buffer type"); 535 } 536 537 bus_dmamap_sync_etc(dmat, map, 0, length, op); 538 } 539 540 541 extern "C" void 542 bus_dmamap_sync_etc(bus_dma_tag_t dmat, bus_dmamap_t map, 543 bus_addr_t offset, bus_size_t length, bus_dmasync_op_t op) 544 { 545 if (map == NULL) 546 return; 547 548 if ((op & BUS_DMASYNC_PREWRITE) != 0) { 549 // "Pre-write": after CPU writes, before device reads. 550 switch (map->buffer_type) { 551 case bus_dmamap::BUFFER_NONE: 552 case bus_dmamap::BUFFER_PROHIBITED: 553 // Nothing to do. 554 break; 555 556 case bus_dmamap::BUFFER_TYPE_SIMPLE: 557 KASSERT((offset + length) <= map->buffer_length, ("mis-sized sync")); 558 memcpy((caddr_t)map->bounce_buffer + offset, 559 (caddr_t)map->buffer + offset, length); 560 break; 561 562 case bus_dmamap::BUFFER_TYPE_MBUF: 563 m_copydata(map->mbuf, offset, length, 564 (caddr_t)map->bounce_buffer + offset); 565 break; 566 567 default: 568 panic("unknown buffer type"); 569 } 570 571 memory_write_barrier(); 572 } 573 574 if ((op & BUS_DMASYNC_POSTREAD) != 0) { 575 // "Post-read": after device writes, before CPU reads. 576 memory_read_barrier(); 577 578 switch (map->buffer_type) { 579 case bus_dmamap::BUFFER_NONE: 580 case bus_dmamap::BUFFER_PROHIBITED: 581 // Nothing to do. 582 break; 583 584 case bus_dmamap::BUFFER_TYPE_SIMPLE: 585 KASSERT((offset + length) <= map->buffer_length, ("mis-sized sync")); 586 memcpy((caddr_t)map->buffer + offset, 587 (caddr_t)map->bounce_buffer + offset, length); 588 break; 589 590 case bus_dmamap::BUFFER_TYPE_MBUF: 591 m_copyback(map->mbuf, offset, length, 592 (caddr_t)map->bounce_buffer + offset); 593 break; 594 595 default: 596 panic("unknown buffer type"); 597 } 598 } 599 } 600