xref: /haiku/src/libs/compat/freebsd_network/bus_dma.cpp (revision ed24eb5ff12640d052171c6a7feba37fab8a75d1)
1 /*
2  * Copyright 2019-2022, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Augustin Cavalier <waddlesplash>
7  */
8 
9 extern "C" {
10 #include <sys/malloc.h>
11 #include <sys/bus.h>
12 #include <sys/lock.h>
13 #include <sys/mutex.h>
14 #include <sys/mbuf.h>
15 
16 #include <machine/bus.h>
17 #include <vm/vm_extern.h>
18 }
19 
20 #include <vm/vm_page.h>
21 
22 
23 // #pragma mark - structures
24 
25 
26 struct bus_dma_tag {
27 	bus_dma_tag_t	parent;
28 	int32			ref_count;
29 	int32			map_count;
30 
31 	int				flags;
32 #define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS1
33 
34 	phys_size_t		alignment;
35 	phys_addr_t		boundary;
36 	phys_addr_t		lowaddr;
37 	phys_addr_t		highaddr;
38 
39 	phys_size_t		maxsize;
40 	uint32			maxsegments;
41 	phys_size_t		maxsegsz;
42 };
43 
44 struct bus_dmamap {
45 	bus_dma_tag_t		dmat;
46 
47 	bus_dma_segment_t*	segments;
48 	int					nsegs;
49 
50 	void*		bounce_buffer;
51 	bus_size_t	bounce_buffer_size;
52 
53 	enum {
54 		BUFFER_NONE = 0,
55 		BUFFER_PROHIBITED,
56 
57 		BUFFER_TYPE_SIMPLE,
58 		BUFFER_TYPE_MBUF,
59 	} buffer_type;
60 	union {
61 		struct {
62 			void*				buffer;
63 			bus_size_t			buffer_length;
64 		};
65 		struct mbuf*		mbuf;
66 	};
67 };
68 
69 
70 // #pragma mark - functions
71 
72 
73 extern "C" void
74 busdma_lock_mutex(void* arg, bus_dma_lock_op_t op)
75 {
76 	struct mtx* dmtx = (struct mtx*)arg;
77 	switch (op) {
78 	case BUS_DMA_LOCK:
79 		mtx_lock(dmtx);
80 	break;
81 	case BUS_DMA_UNLOCK:
82 		mtx_unlock(dmtx);
83 	break;
84 	default:
85 		panic("busdma_lock_mutex: unknown operation 0x%x", op);
86 	}
87 }
88 
89 
90 extern "C" int
91 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary,
92 	bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t* filter,
93 	void* filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz,
94 	int flags, bus_dma_lock_t* lockfunc, void* lockfuncarg, bus_dma_tag_t* dmat)
95 {
96 	if (maxsegsz == 0)
97 		return EINVAL;
98 	if (filter != NULL) {
99 		panic("bus_dma_tag_create: error: filters not supported!");
100 		return EOPNOTSUPP;
101 	}
102 
103 	bus_dma_tag_t newtag = (bus_dma_tag_t)kernel_malloc(sizeof(*newtag),
104 		M_DEVBUF, M_ZERO | M_NOWAIT);
105 	if (newtag == NULL)
106 		return ENOMEM;
107 
108 	if (boundary != 0 && boundary < maxsegsz)
109 		maxsegsz = boundary;
110 
111 	newtag->alignment = alignment;
112 	newtag->boundary = boundary;
113 	newtag->lowaddr = lowaddr;
114 	newtag->highaddr = highaddr;
115 	newtag->maxsize = maxsize;
116 	newtag->maxsegments = nsegments;
117 	newtag->maxsegsz = maxsegsz;
118 	newtag->flags = flags;
119 	newtag->ref_count = 1;
120 	newtag->map_count = 0;
121 
122 	// lockfunc is only needed if callbacks will be invoked asynchronously.
123 
124 	if (parent != NULL) {
125 		newtag->parent = parent;
126 		atomic_add(&parent->ref_count, 1);
127 
128 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
129 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
130 		newtag->alignment = MAX(parent->alignment, newtag->alignment);
131 
132 		if (newtag->boundary == 0) {
133 			newtag->boundary = parent->boundary;
134 		} else if (parent->boundary != 0) {
135 			newtag->boundary = MIN(parent->boundary, newtag->boundary);
136 		}
137 	}
138 
139 	if (newtag->lowaddr < vm_page_max_address())
140 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
141 	if (newtag->alignment > 1)
142 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
143 
144 	*dmat = newtag;
145 	return 0;
146 }
147 
148 
149 extern "C" int
150 bus_dma_tag_destroy(bus_dma_tag_t dmat)
151 {
152 	if (dmat == NULL)
153 		return 0;
154 	if (dmat->map_count != 0)
155 		return EBUSY;
156 
157 	while (dmat != NULL) {
158 		bus_dma_tag_t parent;
159 
160 		parent = dmat->parent;
161 		atomic_add(&dmat->ref_count, -1);
162 		if (dmat->ref_count == 0) {
163 			kernel_free(dmat, M_DEVBUF);
164 
165 			// Last reference released, so release our reference on our parent.
166 			dmat = parent;
167 		} else
168 			dmat = NULL;
169 	}
170 	return 0;
171 }
172 
173 
174 extern "C" int
175 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t* mapp)
176 {
177 	*mapp = (bus_dmamap_t)calloc(sizeof(**mapp), 1);
178 	if (*mapp == NULL)
179 		return ENOMEM;
180 
181 	(*mapp)->dmat = dmat;
182 	(*mapp)->nsegs = 0;
183 	(*mapp)->segments = (bus_dma_segment_t *)calloc(dmat->maxsegments,
184 		sizeof(bus_dma_segment_t));
185 	if ((*mapp)->segments == NULL) {
186 		free((*mapp));
187 		*mapp = NULL;
188 		return ENOMEM;
189 	}
190 
191 	atomic_add(&dmat->map_count, 1);
192 	return 0;
193 }
194 
195 
196 static int
197 _prepare_bounce_buffer(bus_dmamap_t map, bus_size_t reqsize, int flags)
198 {
199 	if (map->buffer_type == bus_dmamap::BUFFER_PROHIBITED) {
200 		panic("cannot bounce, direct DMA only!");
201 		return B_NOT_ALLOWED;
202 	}
203 	if (map->buffer_type != bus_dmamap::BUFFER_NONE) {
204 		panic("bounce buffer already in use!");
205 		return EBUSY;
206 	}
207 
208 	if (map->bounce_buffer_size >= reqsize)
209 		return 0;
210 
211 	if (map->bounce_buffer != NULL) {
212 		kernel_contigfree(map->bounce_buffer, map->bounce_buffer_size, 0);
213 		map->bounce_buffer = NULL;
214 	}
215 
216 	bus_dmamap_t extraMap;
217 	int error = bus_dmamem_alloc(map->dmat, &map->bounce_buffer, flags, &extraMap);
218 	if (error != 0)
219 		return error; // TODO: retry with a smaller size?
220 	map->bounce_buffer_size = map->dmat->maxsize;
221 	bus_dmamap_destroy(map->dmat, extraMap);
222 
223 	return 0;
224 }
225 
226 
227 extern "C" int
228 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
229 {
230 	if (map == NULL)
231 		return 0;
232 	if (map->buffer_type > bus_dmamap::BUFFER_PROHIBITED)
233 		return EBUSY;
234 
235 	atomic_add(&map->dmat->map_count, -1);
236 	kernel_contigfree(map->bounce_buffer, map->bounce_buffer_size, M_DEVBUF);
237 	free(map->segments);
238 	free(map);
239 	return 0;
240 }
241 
242 
243 extern "C" int
244 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
245 	bus_dmamap_t* mapp)
246 {
247 	int mflags;
248 	if (flags & BUS_DMA_NOWAIT)
249 		mflags = M_NOWAIT;
250 	else
251 		mflags = M_WAITOK;
252 
253 	if (flags & BUS_DMA_ZERO)
254 		mflags |= M_ZERO;
255 
256 	// FreeBSD does not permit the "mapp" argument to be NULL, but we do.
257 	if (mapp != NULL) {
258 		bus_dmamap_create(dmat, flags, mapp);
259 
260 		// Drivers assume dmamem will never be bounced, so ensure that.
261 		(*mapp)->buffer_type = bus_dmamap::BUFFER_PROHIBITED;
262 	}
263 
264 	// FreeBSD uses standard malloc() for the case where maxsize <= PAGE_SIZE,
265 	// but we want to keep DMA'd memory a bit more separate, so we always use
266 	// contigmalloc.
267 
268 	// The range specified by lowaddr, highaddr is an *exclusion* range,
269 	// not an inclusion range. So we want to at least start with the low end,
270 	// if possible. (The most common exclusion range is 32-bit only, and
271 	// ones other than that are very rare, so typically this will succeed.)
272 	if (dmat->lowaddr > B_PAGE_SIZE) {
273 		*vaddr = kernel_contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
274 			0, dmat->lowaddr,
275 			dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
276 		if (*vaddr == NULL)
277 			dprintf("bus_dmamem_alloc: failed to allocate with lowaddr "
278 				"0x%" B_PRIxPHYSADDR "\n", dmat->lowaddr);
279 	}
280 	if (*vaddr == NULL && dmat->highaddr < BUS_SPACE_MAXADDR) {
281 		*vaddr = kernel_contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
282 			dmat->highaddr, BUS_SPACE_MAXADDR,
283 			dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
284 	}
285 
286 	if (*vaddr == NULL) {
287 		dprintf("bus_dmamem_alloc: failed to allocate for tag (size %d, "
288 			"low 0x%" B_PRIxPHYSADDR ", high 0x%" B_PRIxPHYSADDR ", "
289 		    "boundary 0x%" B_PRIxPHYSADDR ")\n",
290 			(int)dmat->maxsize, dmat->lowaddr, dmat->highaddr, dmat->boundary);
291 		return ENOMEM;
292 	} else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
293 		dprintf("bus_dmamem_alloc: failed to align memory: wanted %#x, got %#x\n",
294 			dmat->alignment, vtophys(vaddr));
295 		bus_dmamem_free(dmat, *vaddr, (mapp != NULL) ? *mapp : NULL);
296 		return ENOMEM;
297 	}
298 	return 0;
299 }
300 
301 
302 extern "C" void
303 bus_dmamem_free_tagless(void* vaddr, size_t size)
304 {
305 	kernel_contigfree(vaddr, size, M_DEVBUF);
306 }
307 
308 
309 extern "C" void
310 bus_dmamem_free(bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map)
311 {
312 	bus_dmamem_free_tagless(vaddr, dmat->maxsize);
313 	bus_dmamap_destroy(dmat, map);
314 }
315 
316 
317 static bool
318 _validate_address(bus_dma_tag_t dmat, bus_addr_t paddr, bool validate_alignment = true)
319 {
320 	if (paddr > dmat->lowaddr && paddr <= dmat->highaddr)
321 		return false;
322 	if (validate_alignment && !vm_addr_align_ok(paddr, dmat->alignment))
323 		return false;
324 
325 	return true;
326 }
327 
328 
329 static int
330 _bus_load_buffer(bus_dma_tag_t dmat, void* buf, bus_size_t buflen,
331 	int flags, bus_addr_t& last_phys_addr, bus_dma_segment_t* segs,
332 	int& seg, bool first)
333 {
334 	vm_offset_t virtual_addr = (vm_offset_t)buf;
335 	const bus_addr_t boundary_mask = ~(dmat->boundary - 1);
336 
337 	while (buflen > 0) {
338 		const bus_addr_t phys_addr = pmap_kextract(virtual_addr);
339 
340 		bus_size_t segment_size = PAGESIZE - (phys_addr & PAGE_MASK);
341 		if (segment_size > buflen)
342 			segment_size = buflen;
343 		if (segment_size > dmat->maxsegsz)
344 			segment_size = dmat->maxsegsz;
345 
346 		if (dmat->boundary > 0) {
347 			// Make sure we don't cross a boundary.
348 			bus_addr_t boundary_addr = (phys_addr + dmat->boundary) & boundary_mask;
349 			if (segment_size > (boundary_addr - phys_addr))
350 				segment_size = (boundary_addr - phys_addr);
351 		}
352 
353 		// If possible, coalesce into the previous segment.
354 		if (!first && phys_addr == last_phys_addr
355 				&& (segs[seg].ds_len + segment_size) <= dmat->maxsegsz
356 				&& (dmat->boundary == 0
357 					|| (segs[seg].ds_addr & boundary_mask)
358 						== (phys_addr & boundary_mask))) {
359 			if (!_validate_address(dmat, phys_addr, false))
360 				return ERANGE;
361 
362 			segs[seg].ds_len += segment_size;
363 		} else {
364 			if (first)
365 				first = false;
366 			else if (++seg >= dmat->maxsegments)
367 				break;
368 
369 			if (!_validate_address(dmat, phys_addr))
370 				return ERANGE;
371 
372 			segs[seg].ds_addr = phys_addr;
373 			segs[seg].ds_len = segment_size;
374 		}
375 
376 		last_phys_addr = phys_addr + segment_size;
377 		virtual_addr += segment_size;
378 		buflen -= segment_size;
379 	}
380 
381 	return (buflen != 0 ? EFBIG : 0);
382 }
383 
384 
385 extern "C" int
386 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
387 	bus_size_t buflen, bus_dmamap_callback_t *callback,
388 	void *callback_arg, int flags)
389 {
390 	bus_addr_t lastaddr = 0;
391 	int error, seg = 0;
392 
393 	if (buflen > dmat->maxsize)
394 		return EINVAL;
395 
396 	error = _bus_load_buffer(dmat, buf, buflen, flags,
397 		lastaddr, map->segments, seg, true);
398 
399 	if (error != 0) {
400 		// Try again using a bounce buffer.
401 		error = _prepare_bounce_buffer(map, buflen, flags);
402 		if (error != 0)
403 			return error;
404 
405 		map->buffer_type = bus_dmamap::BUFFER_TYPE_SIMPLE;
406 		map->buffer = buf;
407 		map->buffer_length = buflen;
408 
409 		seg = lastaddr = 0;
410 		error = _bus_load_buffer(dmat, map->bounce_buffer, buflen, flags,
411 			lastaddr, map->segments, seg, true);
412 	}
413 
414 	if (error)
415 		(*callback)(callback_arg, map->segments, 0, error);
416 	else
417 		(*callback)(callback_arg, map->segments, seg + 1, 0);
418 
419 	// ENOMEM is returned; all other errors are only sent to the callback.
420 	if (error == ENOMEM)
421 		return error;
422 	return 0;
423 }
424 
425 
426 extern "C" int
427 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
428 	bus_dma_segment_t* segs, int* _nsegs, int flags)
429 {
430 	M_ASSERTPKTHDR(mb);
431 
432 	if (mb->m_pkthdr.len > dmat->maxsize)
433 		return EINVAL;
434 
435 	int seg = 0, error = 0;
436 	bool first = true;
437 	bus_addr_t lastaddr = 0;
438 	flags |= BUS_DMA_NOWAIT;
439 
440 	for (struct mbuf* m = mb; m != NULL && error == 0; m = m->m_next) {
441 		if (m->m_len <= 0)
442 			continue;
443 
444 		error = _bus_load_buffer(dmat, m->m_data, m->m_len,
445 			flags, lastaddr, segs, seg, first);
446 		first = false;
447 	}
448 
449 	if (error != 0) {
450 		// Try again using a bounce buffer.
451 		error = _prepare_bounce_buffer(map, mb->m_pkthdr.len, flags);
452 		if (error != 0)
453 			return error;
454 
455 		map->buffer_type = bus_dmamap::BUFFER_TYPE_MBUF;
456 		map->mbuf = mb;
457 
458 		seg = lastaddr = 0;
459 		error = _bus_load_buffer(dmat, map->bounce_buffer, mb->m_pkthdr.len, flags,
460 			lastaddr, segs, seg, true);
461 	}
462 
463 	*_nsegs = seg + 1;
464 	return error;
465 }
466 
467 
468 extern "C" int
469 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
470 	bus_dmamap_callback2_t* callback, void* callback_arg, int flags)
471 {
472 	int nsegs, error;
473 	error = bus_dmamap_load_mbuf_sg(dmat, map, mb, map->segments, &nsegs, flags);
474 
475 	if (error) {
476 		(*callback)(callback_arg, map->segments, 0, 0, error);
477 	} else {
478 		(*callback)(callback_arg, map->segments, nsegs, mb->m_pkthdr.len,
479 			error);
480 	}
481 	return error;
482 }
483 
484 
485 extern "C" void
486 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
487 {
488 	if (map == NULL)
489 		return;
490 
491 	if (map->buffer_type != bus_dmamap::BUFFER_PROHIBITED)
492 		map->buffer_type = bus_dmamap::BUFFER_NONE;
493 	map->buffer = NULL;
494 }
495 
496 
497 extern "C" void
498 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
499 {
500 	if (map == NULL)
501 		return;
502 
503 	bus_size_t length = 0;
504 	switch (map->buffer_type) {
505 		case bus_dmamap::BUFFER_NONE:
506 		case bus_dmamap::BUFFER_PROHIBITED:
507 			// Nothing to do.
508 			return;
509 
510 		case bus_dmamap::BUFFER_TYPE_SIMPLE:
511 			length = map->buffer_length;
512 			break;
513 
514 		case bus_dmamap::BUFFER_TYPE_MBUF:
515 			length = map->mbuf->m_pkthdr.len;
516 			break;
517 
518 		default:
519 			panic("unknown buffer type");
520 	}
521 
522 	bus_dmamap_sync_etc(dmat, map, 0, length, op);
523 }
524 
525 
526 extern "C" void
527 bus_dmamap_sync_etc(bus_dma_tag_t dmat, bus_dmamap_t map,
528 	bus_addr_t offset, bus_size_t length, bus_dmasync_op_t op)
529 {
530 	if (map == NULL)
531 		return;
532 
533 	if ((op & BUS_DMASYNC_PREWRITE) != 0) {
534 		// "Pre-write": after CPU writes, before device reads.
535 		switch (map->buffer_type) {
536 			case bus_dmamap::BUFFER_NONE:
537 			case bus_dmamap::BUFFER_PROHIBITED:
538 				// Nothing to do.
539 				break;
540 
541 			case bus_dmamap::BUFFER_TYPE_SIMPLE:
542 				KASSERT((offset + length) <= map->buffer_length, ("mis-sized sync"));
543 				memcpy((caddr_t)map->bounce_buffer + offset,
544 					(caddr_t)map->buffer + offset, length);
545 				break;
546 
547 			case bus_dmamap::BUFFER_TYPE_MBUF:
548 				m_copydata(map->mbuf, offset, length,
549 					(caddr_t)map->bounce_buffer + offset);
550 				break;
551 
552 			default:
553 				panic("unknown buffer type");
554 		}
555 
556 		memory_write_barrier();
557 	}
558 
559 	if ((op & BUS_DMASYNC_POSTREAD) != 0) {
560 		// "Post-read": after device writes, before CPU reads.
561 		memory_read_barrier();
562 
563 		switch (map->buffer_type) {
564 			case bus_dmamap::BUFFER_NONE:
565 			case bus_dmamap::BUFFER_PROHIBITED:
566 				// Nothing to do.
567 				break;
568 
569 			case bus_dmamap::BUFFER_TYPE_SIMPLE:
570 				KASSERT((offset + length) <= map->buffer_length, ("mis-sized sync"));
571 				memcpy((caddr_t)map->buffer + offset,
572 					(caddr_t)map->bounce_buffer + offset, length);
573 				break;
574 
575 			case bus_dmamap::BUFFER_TYPE_MBUF:
576 				m_copyback(map->mbuf, offset, length,
577 					(caddr_t)map->bounce_buffer + offset);
578 				break;
579 
580 			default:
581 				panic("unknown buffer type");
582 		}
583 	}
584 }
585