xref: /haiku/src/libs/compat/freebsd_network/bus_dma.cpp (revision 4a55cc230cf7566cadcbb23b1928eefff8aea9a2)
1 /*
2  * Copyright 2019-2022, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Augustin Cavalier <waddlesplash>
7  */
8 
9 extern "C" {
10 #include <sys/malloc.h>
11 #include <sys/bus.h>
12 #include <sys/lock.h>
13 #include <sys/mutex.h>
14 #include <sys/mbuf.h>
15 
16 #include <machine/bus.h>
17 #include <vm/vm_extern.h>
18 }
19 
20 #include <vm/vm_page.h>
21 
22 
23 // #pragma mark - structures
24 
25 
26 struct bus_dma_tag {
27 	bus_dma_tag_t	parent;
28 	int32			ref_count;
29 	int32			map_count;
30 
31 	int				flags;
32 #define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS1
33 
34 	phys_size_t		alignment;
35 	phys_addr_t		boundary;
36 	phys_addr_t		lowaddr;
37 	phys_addr_t		highaddr;
38 
39 	phys_size_t		maxsize;
40 	uint32			maxsegments;
41 	phys_size_t		maxsegsz;
42 };
43 
44 struct bus_dmamap {
45 	bus_dma_tag_t		dmat;
46 
47 	bus_dma_segment_t*	segments;
48 	int					nsegs;
49 
50 	void*		bounce_buffer;
51 	bus_size_t	bounce_buffer_size;
52 
53 	enum {
54 		BUFFER_NONE = 0,
55 		BUFFER_PROHIBITED,
56 
57 		BUFFER_TYPE_SIMPLE,
58 		BUFFER_TYPE_MBUF,
59 	} buffer_type;
60 	union {
61 		struct {
62 			void*				buffer;
63 			bus_size_t			buffer_length;
64 		};
65 		struct mbuf*		mbuf;
66 	};
67 };
68 
69 
70 // #pragma mark - functions
71 
72 
73 extern "C" void
74 busdma_lock_mutex(void* arg, bus_dma_lock_op_t op)
75 {
76 	struct mtx* dmtx = (struct mtx*)arg;
77 	switch (op) {
78 	case BUS_DMA_LOCK:
79 		mtx_lock(dmtx);
80 	break;
81 	case BUS_DMA_UNLOCK:
82 		mtx_unlock(dmtx);
83 	break;
84 	default:
85 		panic("busdma_lock_mutex: unknown operation 0x%x", op);
86 	}
87 }
88 
89 
90 extern "C" int
91 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary,
92 	bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t* filter,
93 	void* filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz,
94 	int flags, bus_dma_lock_t* lockfunc, void* lockfuncarg, bus_dma_tag_t* dmat)
95 {
96 	if (maxsegsz == 0)
97 		return EINVAL;
98 	if (filter != NULL) {
99 		panic("bus_dma_tag_create: error: filters not supported!");
100 		return EOPNOTSUPP;
101 	}
102 
103 	bus_dma_tag_t newtag = (bus_dma_tag_t)kernel_malloc(sizeof(*newtag),
104 		M_DEVBUF, M_ZERO | M_NOWAIT);
105 	if (newtag == NULL)
106 		return ENOMEM;
107 
108 	if (boundary != 0 && boundary < maxsegsz)
109 		maxsegsz = boundary;
110 
111 	newtag->alignment = alignment;
112 	newtag->boundary = boundary;
113 	newtag->lowaddr = lowaddr;
114 	newtag->highaddr = highaddr;
115 	newtag->maxsize = maxsize;
116 	newtag->maxsegments = nsegments;
117 	newtag->maxsegsz = maxsegsz;
118 	newtag->flags = flags;
119 	newtag->ref_count = 1;
120 	newtag->map_count = 0;
121 
122 	// lockfunc is only needed if callbacks will be invoked asynchronously.
123 
124 	if (parent != NULL) {
125 		newtag->parent = parent;
126 		atomic_add(&parent->ref_count, 1);
127 
128 		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
129 		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
130 		newtag->alignment = MAX(parent->alignment, newtag->alignment);
131 
132 		if (newtag->boundary == 0) {
133 			newtag->boundary = parent->boundary;
134 		} else if (parent->boundary != 0) {
135 			newtag->boundary = MIN(parent->boundary, newtag->boundary);
136 		}
137 	}
138 
139 	if (newtag->lowaddr < vm_page_max_address())
140 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
141 	if (newtag->alignment > 1)
142 		newtag->flags |= BUS_DMA_COULD_BOUNCE;
143 
144 	*dmat = newtag;
145 	return 0;
146 }
147 
148 
149 extern "C" int
150 bus_dma_tag_destroy(bus_dma_tag_t dmat)
151 {
152 	if (dmat == NULL)
153 		return 0;
154 	if (dmat->map_count != 0)
155 		return EBUSY;
156 
157 	while (dmat != NULL) {
158 		bus_dma_tag_t parent;
159 
160 		parent = dmat->parent;
161 		atomic_add(&dmat->ref_count, -1);
162 		if (dmat->ref_count == 0) {
163 			kernel_free(dmat, M_DEVBUF);
164 
165 			// Last reference released, so release our reference on our parent.
166 			dmat = parent;
167 		} else
168 			dmat = NULL;
169 	}
170 	return 0;
171 }
172 
173 
174 extern "C" int
175 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t* mapp)
176 {
177 	*mapp = (bus_dmamap_t)calloc(sizeof(**mapp), 1);
178 	if (*mapp == NULL)
179 		return ENOMEM;
180 
181 	(*mapp)->dmat = dmat;
182 	(*mapp)->nsegs = 0;
183 	(*mapp)->segments = (bus_dma_segment_t *)calloc(dmat->maxsegments,
184 		sizeof(bus_dma_segment_t));
185 	if ((*mapp)->segments == NULL) {
186 		free((*mapp));
187 		*mapp = NULL;
188 		return ENOMEM;
189 	}
190 
191 	atomic_add(&dmat->map_count, 1);
192 	return 0;
193 }
194 
195 
196 static int
197 _prepare_bounce_buffer(bus_dmamap_t map, bus_size_t reqsize, int flags)
198 {
199 	if (map->buffer_type == bus_dmamap::BUFFER_PROHIBITED) {
200 		panic("cannot bounce, direct DMA only!");
201 		return B_NOT_ALLOWED;
202 	}
203 	if (map->buffer_type != bus_dmamap::BUFFER_NONE) {
204 		panic("bounce buffer already in use!");
205 		return EBUSY;
206 	}
207 
208 	if (map->bounce_buffer_size >= reqsize)
209 		return 0;
210 
211 	if (map->bounce_buffer != NULL) {
212 		kernel_contigfree(map->bounce_buffer, map->bounce_buffer_size, 0);
213 		map->bounce_buffer = NULL;
214 	}
215 
216 	bus_dmamap_t extraMap;
217 	int error = bus_dmamem_alloc(map->dmat, &map->bounce_buffer, flags, &extraMap);
218 	if (error != 0)
219 		return error; // TODO: retry with a smaller size?
220 	map->bounce_buffer_size = map->dmat->maxsize;
221 	bus_dmamap_destroy(map->dmat, extraMap);
222 
223 	return 0;
224 }
225 
226 
227 extern "C" int
228 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
229 {
230 	if (map == NULL)
231 		return 0;
232 	if (map->buffer_type > bus_dmamap::BUFFER_PROHIBITED)
233 		return EBUSY;
234 
235 	atomic_add(&map->dmat->map_count, -1);
236 	kernel_contigfree(map->bounce_buffer, map->bounce_buffer_size, M_DEVBUF);
237 	free(map->segments);
238 	free(map);
239 	return 0;
240 }
241 
242 
243 extern "C" int
244 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
245 	bus_dmamap_t* mapp)
246 {
247 	int mflags;
248 	if (flags & BUS_DMA_NOWAIT)
249 		mflags = M_NOWAIT;
250 	else
251 		mflags = M_WAITOK;
252 
253 	if (flags & BUS_DMA_ZERO)
254 		mflags |= M_ZERO;
255 
256 	bus_dmamap_create(dmat, flags, mapp);
257 
258 	// Drivers assume dmamem will never be bounced, so ensure that.
259 	(*mapp)->buffer_type = bus_dmamap::BUFFER_PROHIBITED;
260 
261 	// FreeBSD uses standard malloc() for the case where maxsize <= PAGE_SIZE,
262 	// but we want to keep DMA'd memory a bit more separate, so we always use
263 	// contigmalloc.
264 
265 	// The range specified by lowaddr, highaddr is an *exclusion* range,
266 	// not an inclusion range. So we want to at least start with the low end,
267 	// if possible. (The most common exclusion range is 32-bit only, and
268 	// ones other than that are very rare, so typically this will succeed.)
269 	if (dmat->lowaddr > B_PAGE_SIZE) {
270 		*vaddr = kernel_contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
271 			0, dmat->lowaddr,
272 			dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
273 		if (*vaddr == NULL)
274 			dprintf("bus_dmamem_alloc: failed to allocate with lowaddr "
275 				"0x%" B_PRIxPHYSADDR "\n", dmat->lowaddr);
276 	}
277 	if (*vaddr == NULL && dmat->highaddr < BUS_SPACE_MAXADDR) {
278 		*vaddr = kernel_contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
279 			dmat->highaddr, BUS_SPACE_MAXADDR,
280 			dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
281 	}
282 
283 	if (*vaddr == NULL) {
284 		dprintf("bus_dmamem_alloc: failed to allocate for tag (size %d, "
285 			"low 0x%" B_PRIxPHYSADDR ", high 0x%" B_PRIxPHYSADDR ", "
286 		    "boundary 0x%" B_PRIxPHYSADDR ")\n",
287 			(int)dmat->maxsize, dmat->lowaddr, dmat->highaddr, dmat->boundary);
288 		return ENOMEM;
289 	} else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
290 		dprintf("bus_dmamem_alloc: failed to align memory: wanted %#x, got %#x\n",
291 			dmat->alignment, vtophys(vaddr));
292 		bus_dmamem_free(dmat, *vaddr, *mapp);
293 		return ENOMEM;
294 	}
295 	return 0;
296 }
297 
298 
299 extern "C" void
300 bus_dmamem_free(bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map)
301 {
302 	kernel_contigfree(vaddr, dmat->maxsize, M_DEVBUF);
303 	bus_dmamap_destroy(dmat, map);
304 }
305 
306 
307 static bool
308 _validate_address(bus_dma_tag_t dmat, bus_addr_t paddr, bool validate_alignment = true)
309 {
310 	if (paddr > dmat->lowaddr && paddr <= dmat->highaddr)
311 		return false;
312 	if (validate_alignment && !vm_addr_align_ok(paddr, dmat->alignment))
313 		return false;
314 
315 	return true;
316 }
317 
318 
319 static int
320 _bus_load_buffer(bus_dma_tag_t dmat, void* buf, bus_size_t buflen,
321 	int flags, bus_addr_t& last_phys_addr, bus_dma_segment_t* segs,
322 	int& seg, bool first)
323 {
324 	vm_offset_t virtual_addr = (vm_offset_t)buf;
325 	const bus_addr_t boundary_mask = ~(dmat->boundary - 1);
326 
327 	while (buflen > 0) {
328 		const bus_addr_t phys_addr = pmap_kextract(virtual_addr);
329 
330 		bus_size_t segment_size = PAGESIZE - (phys_addr & PAGE_MASK);
331 		if (segment_size > buflen)
332 			segment_size = buflen;
333 		if (segment_size > dmat->maxsegsz)
334 			segment_size = dmat->maxsegsz;
335 
336 		if (dmat->boundary > 0) {
337 			// Make sure we don't cross a boundary.
338 			bus_addr_t boundary_addr = (phys_addr + dmat->boundary) & boundary_mask;
339 			if (segment_size > (boundary_addr - phys_addr))
340 				segment_size = (boundary_addr - phys_addr);
341 		}
342 
343 		// If possible, coalesce into the previous segment.
344 		if (!first && phys_addr == last_phys_addr
345 				&& (segs[seg].ds_len + segment_size) <= dmat->maxsegsz
346 				&& (dmat->boundary == 0
347 					|| (segs[seg].ds_addr & boundary_mask)
348 						== (phys_addr & boundary_mask))) {
349 			if (!_validate_address(dmat, phys_addr, false))
350 				return ERANGE;
351 
352 			segs[seg].ds_len += segment_size;
353 		} else {
354 			if (first)
355 				first = false;
356 			else if (++seg >= dmat->maxsegments)
357 				break;
358 
359 			if (!_validate_address(dmat, phys_addr))
360 				return ERANGE;
361 
362 			segs[seg].ds_addr = phys_addr;
363 			segs[seg].ds_len = segment_size;
364 		}
365 
366 		last_phys_addr = phys_addr + segment_size;
367 		virtual_addr += segment_size;
368 		buflen -= segment_size;
369 	}
370 
371 	return (buflen != 0 ? EFBIG : 0);
372 }
373 
374 
375 extern "C" int
376 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
377 	bus_size_t buflen, bus_dmamap_callback_t *callback,
378 	void *callback_arg, int flags)
379 {
380 	bus_addr_t lastaddr = 0;
381 	int error, seg = 0;
382 
383 	if (buflen > dmat->maxsize)
384 		return EINVAL;
385 
386 	error = _bus_load_buffer(dmat, buf, buflen, flags,
387 		lastaddr, map->segments, seg, true);
388 
389 	if (error != 0) {
390 		// Try again using a bounce buffer.
391 		error = _prepare_bounce_buffer(map, buflen, flags);
392 		if (error != 0)
393 			return error;
394 
395 		map->buffer_type = bus_dmamap::BUFFER_TYPE_SIMPLE;
396 		map->buffer = buf;
397 		map->buffer_length = buflen;
398 
399 		seg = lastaddr = 0;
400 		error = _bus_load_buffer(dmat, map->bounce_buffer, buflen, flags,
401 			lastaddr, map->segments, seg, true);
402 	}
403 
404 	if (error)
405 		(*callback)(callback_arg, map->segments, 0, error);
406 	else
407 		(*callback)(callback_arg, map->segments, seg + 1, 0);
408 
409 	// ENOMEM is returned; all other errors are only sent to the callback.
410 	if (error == ENOMEM)
411 		return error;
412 	return 0;
413 }
414 
415 
416 extern "C" int
417 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
418 	bus_dma_segment_t* segs, int* _nsegs, int flags)
419 {
420 	M_ASSERTPKTHDR(mb);
421 
422 	if (mb->m_pkthdr.len > dmat->maxsize)
423 		return EINVAL;
424 
425 	int seg = 0, error = 0;
426 	bool first = true;
427 	bus_addr_t lastaddr = 0;
428 	flags |= BUS_DMA_NOWAIT;
429 
430 	for (struct mbuf* m = mb; m != NULL && error == 0; m = m->m_next) {
431 		if (m->m_len <= 0)
432 			continue;
433 
434 		error = _bus_load_buffer(dmat, m->m_data, m->m_len,
435 			flags, lastaddr, segs, seg, first);
436 		first = false;
437 	}
438 
439 	if (error != 0) {
440 		// Try again using a bounce buffer.
441 		error = _prepare_bounce_buffer(map, mb->m_pkthdr.len, flags);
442 		if (error != 0)
443 			return error;
444 
445 		map->buffer_type = bus_dmamap::BUFFER_TYPE_MBUF;
446 		map->mbuf = mb;
447 
448 		seg = lastaddr = 0;
449 		error = _bus_load_buffer(dmat, map->bounce_buffer, mb->m_pkthdr.len, flags,
450 			lastaddr, segs, seg, true);
451 	}
452 
453 	*_nsegs = seg + 1;
454 	return error;
455 }
456 
457 
458 extern "C" int
459 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
460 	bus_dmamap_callback2_t* callback, void* callback_arg, int flags)
461 {
462 	int nsegs, error;
463 	error = bus_dmamap_load_mbuf_sg(dmat, map, mb, map->segments, &nsegs, flags);
464 
465 	if (error) {
466 		(*callback)(callback_arg, map->segments, 0, 0, error);
467 	} else {
468 		(*callback)(callback_arg, map->segments, nsegs, mb->m_pkthdr.len,
469 			error);
470 	}
471 	return error;
472 }
473 
474 
475 extern "C" void
476 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
477 {
478 	if (map == NULL)
479 		return;
480 
481 	if (map->buffer_type != bus_dmamap::BUFFER_PROHIBITED)
482 		map->buffer_type = bus_dmamap::BUFFER_NONE;
483 	map->buffer = NULL;
484 }
485 
486 
487 extern "C" void
488 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
489 {
490 	if (map == NULL)
491 		return;
492 
493 	bus_size_t length = 0;
494 	switch (map->buffer_type) {
495 		case bus_dmamap::BUFFER_NONE:
496 		case bus_dmamap::BUFFER_PROHIBITED:
497 			// Nothing to do.
498 			return;
499 
500 		case bus_dmamap::BUFFER_TYPE_SIMPLE:
501 			length = map->buffer_length;
502 			break;
503 
504 		case bus_dmamap::BUFFER_TYPE_MBUF:
505 			length = map->mbuf->m_pkthdr.len;
506 			break;
507 
508 		default:
509 			panic("unknown buffer type");
510 	}
511 
512 	bus_dmamap_sync_etc(dmat, map, 0, length, op);
513 }
514 
515 
516 extern "C" void
517 bus_dmamap_sync_etc(bus_dma_tag_t dmat, bus_dmamap_t map,
518 	bus_addr_t offset, bus_size_t length, bus_dmasync_op_t op)
519 {
520 	if (map == NULL)
521 		return;
522 
523 	if ((op & BUS_DMASYNC_PREWRITE) != 0) {
524 		// "Pre-write": after CPU writes, before device reads.
525 		switch (map->buffer_type) {
526 			case bus_dmamap::BUFFER_NONE:
527 			case bus_dmamap::BUFFER_PROHIBITED:
528 				// Nothing to do.
529 				break;
530 
531 			case bus_dmamap::BUFFER_TYPE_SIMPLE:
532 				KASSERT((offset + length) <= map->buffer_length, ("mis-sized sync"));
533 				memcpy((caddr_t)map->bounce_buffer + offset,
534 					(caddr_t)map->buffer + offset, length);
535 				break;
536 
537 			case bus_dmamap::BUFFER_TYPE_MBUF:
538 				m_copydata(map->mbuf, offset, length,
539 					(caddr_t)map->bounce_buffer + offset);
540 				break;
541 
542 			default:
543 				panic("unknown buffer type");
544 		}
545 
546 		memory_write_barrier();
547 	}
548 
549 	if ((op & BUS_DMASYNC_POSTREAD) != 0) {
550 		// "Post-read": after device writes, before CPU reads.
551 		memory_read_barrier();
552 
553 		switch (map->buffer_type) {
554 			case bus_dmamap::BUFFER_NONE:
555 			case bus_dmamap::BUFFER_PROHIBITED:
556 				// Nothing to do.
557 				break;
558 
559 			case bus_dmamap::BUFFER_TYPE_SIMPLE:
560 				KASSERT((offset + length) <= map->buffer_length, ("mis-sized sync"));
561 				memcpy((caddr_t)map->buffer + offset,
562 					(caddr_t)map->bounce_buffer + offset, length);
563 				break;
564 
565 			case bus_dmamap::BUFFER_TYPE_MBUF:
566 				m_copyback(map->mbuf, offset, length,
567 					(caddr_t)map->bounce_buffer + offset);
568 				break;
569 
570 			default:
571 				panic("unknown buffer type");
572 		}
573 	}
574 }
575