xref: /haiku/src/libs/compat/freebsd_network/bus_dma.cpp (revision 15fb7d88e971c4d6c787c6a3a5c159afb1ebf77b)
1 /*
2  * Copyright 2019, Haiku, Inc. All rights reserved.
3  * Distributed under the terms of the MIT License.
4  *
5  * Authors:
6  *		Augustin Cavalier <waddlesplash>
7  */
8 
9 extern "C" {
10 #include <sys/malloc.h>
11 #include <sys/bus.h>
12 #include <sys/lock.h>
13 #include <sys/mutex.h>
14 #include <sys/mbuf.h>
15 
16 #include <machine/bus.h>
17 }
18 
19 
20 // #pragma mark - structures
21 
22 
23 struct bus_dma_tag {
24 	bus_dma_tag_t	parent;
25 	phys_size_t		alignment;
26 	phys_addr_t		boundary;
27 	phys_addr_t		lowaddr;
28 	phys_addr_t		highaddr;
29 	bus_dma_filter_t* filter;
30 	void*			filterarg;
31 	phys_size_t		maxsize;
32 	uint32			maxsegments;
33 	bus_dma_segment_t* segments;
34 	phys_size_t		maxsegsz;
35 	int32			ref_count;
36 };
37 
38 
39 // #pragma mark - functions
40 
41 
42 void
43 busdma_lock_mutex(void* arg, bus_dma_lock_op_t op)
44 {
45 	struct mtx* dmtx = (struct mtx*)arg;
46 	switch (op) {
47 	case BUS_DMA_LOCK:
48 		mtx_lock(dmtx);
49 	break;
50 	case BUS_DMA_UNLOCK:
51 		mtx_unlock(dmtx);
52 	break;
53 	default:
54 		panic("busdma_lock_mutex: unknown operation 0x%x", op);
55 	}
56 }
57 
58 
59 int
60 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_size_t boundary,
61 	bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t* filter,
62 	void* filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz,
63 	int flags, bus_dma_lock_t* lockfunc, void* lockfuncarg, bus_dma_tag_t* dmat)
64 {
65 	if (boundary != 0 && boundary < maxsegsz)
66 		maxsegsz = boundary;
67 
68 	*dmat = NULL;
69 
70 	bus_dma_tag_t newtag = (bus_dma_tag_t)kernel_malloc(sizeof(*newtag),
71 		M_DEVBUF, M_ZERO | M_NOWAIT);
72 	if (newtag == NULL)
73 		return ENOMEM;
74 
75 	newtag->parent = parent;
76 	newtag->alignment = alignment;
77 	newtag->boundary = boundary;
78 	newtag->lowaddr = lowaddr;
79 	newtag->highaddr = highaddr;
80 	newtag->filter = filter;
81 	newtag->filterarg = filterarg;
82 	newtag->maxsize = maxsize;
83 	newtag->maxsegments = nsegments;
84 	newtag->maxsegsz = maxsegsz;
85 	newtag->ref_count = 1;
86 
87 	if (newtag->parent != NULL) {
88 		atomic_add(&parent->ref_count, 1);
89 
90 		newtag->lowaddr = max_c(parent->lowaddr, newtag->lowaddr);
91 		newtag->highaddr = min_c(parent->highaddr, newtag->highaddr);
92 
93 		if (newtag->boundary == 0) {
94 			newtag->boundary = parent->boundary;
95 		} else if (parent->boundary != 0) {
96 			newtag->boundary = min_c(parent->boundary, newtag->boundary);
97 		}
98 
99 		if (newtag->filter == NULL) {
100 			newtag->filter = parent->filter;
101 			newtag->filterarg = parent->filterarg;
102 		}
103 	}
104 
105 	if (newtag->filter != NULL)
106 		panic("bus_dma_tag_create: error: filters not implemented!");
107 
108 	*dmat = newtag;
109 	return 0;
110 }
111 
112 
113 int
114 bus_dma_tag_destroy(bus_dma_tag_t dmat)
115 {
116 	if (dmat == NULL)
117 		return 0;
118 
119 	while (dmat != NULL) {
120 		bus_dma_tag_t parent;
121 
122 		parent = dmat->parent;
123 		atomic_add(&dmat->ref_count, -1);
124 		if (dmat->ref_count == 0) {
125 			kernel_free(dmat->segments, M_DEVBUF);
126 			kernel_free(dmat, M_DEVBUF);
127 
128 			// Last reference released, so release our reference on our parent.
129 			dmat = parent;
130 		} else
131 			dmat = NULL;
132 	}
133 	return 0;
134 }
135 
136 
137 int
138 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t* mapp)
139 {
140 	// We never bounce, so we do not need maps.
141 	*mapp = NULL;
142 	return 0;
143 }
144 
145 
146 int
147 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
148 {
149 	// We never create maps, so we never need to destroy them.
150 	if (map)
151 		panic("map is not NULL!");
152 	return 0;
153 }
154 
155 
156 int
157 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
158 	bus_dmamap_t* mapp)
159 {
160 	int mflags;
161 	if (flags & BUS_DMA_NOWAIT)
162 		mflags = M_NOWAIT;
163 	else
164 		mflags = M_WAITOK;
165 
166 	if (flags & BUS_DMA_ZERO)
167 		mflags |= M_ZERO;
168 
169 	// We never need to map/bounce.
170 	*mapp = NULL;
171 
172 	// FreeBSD uses standard malloc() for the case where maxsize <= PAGE_SIZE,
173 	// however, our malloc() has no guarantees that the allocated memory will
174 	// not be swapped out, which obviously is a requirement here. So we must
175 	// always use kernel_contigmalloc().
176 
177 	// The range specified by lowaddr, highaddr is an *exclusion* range,
178 	// not an inclusion range. So we want to at least start with the low end,
179 	// if possible. (The most common exclusion range is 32-bit only,
180 	// and ones other than that are very rare, so typically this will
181 	// succeed.)
182 	if (dmat->lowaddr > B_PAGE_SIZE) {
183 		*vaddr = kernel_contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
184 			0, dmat->lowaddr,
185 			dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
186 		if (*vaddr == NULL)
187 			dprintf("bus_dmamem_alloc: failed to allocate with lowaddr "
188 				"0x%" B_PRIxPHYSADDR "\n", dmat->lowaddr);
189 	}
190 	if (*vaddr == NULL && dmat->highaddr < BUS_SPACE_MAXADDR) {
191 		*vaddr = kernel_contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
192 			dmat->highaddr, BUS_SPACE_MAXADDR,
193 			dmat->alignment ? dmat->alignment : 1ul, dmat->boundary);
194 	}
195 
196 	if (*vaddr == NULL) {
197 		dprintf("bus_dmamem_alloc: failed to allocate for tag (size %d, "
198 			"low 0x%" B_PRIxPHYSADDR ", high 0x%" B_PRIxPHYSADDR ", "
199 		    "boundary 0x%" B_PRIxPHYSADDR ")\n",
200 			(int)dmat->maxsize, dmat->lowaddr, dmat->highaddr, dmat->boundary);
201 		return ENOMEM;
202 	} else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
203 		dprintf("bus_dmamem_alloc: failed to align memory: wanted %#x, got %#x\n",
204 			dmat->alignment, vtophys(vaddr));
205 		bus_dmamem_free(dmat, *vaddr, *mapp);
206 		return ENOMEM;
207 	}
208 	return 0;
209 }
210 
211 
212 void
213 bus_dmamem_free(bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map)
214 {
215 	// We never bounce, so map should be NULL.
216 	if (map != NULL)
217 		panic("bus_dmamem_free: map is not NULL!");
218 
219 	kernel_contigfree(vaddr, dmat->maxsize, M_DEVBUF);
220 }
221 
222 
223 static int
224 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t /* map */, void* buf,
225 	bus_size_t buflen, int flags, bus_addr_t* lastaddrp, bus_dma_segment_t* segs,
226 	int& seg, bool first)
227 {
228 	vm_offset_t virtual_addr = (vm_offset_t)buf;
229 	bus_addr_t last_phys_addr = *lastaddrp;
230 	const bus_addr_t boundary_mask = ~(dmat->boundary - 1);
231 
232 	while (buflen > 0) {
233 		const bus_addr_t phys_addr = pmap_kextract(virtual_addr);
234 
235 		bus_size_t segment_size = B_PAGE_SIZE - (phys_addr & (B_PAGE_SIZE - 1));
236 		if (segment_size > buflen)
237 			segment_size = buflen;
238 
239 		if (dmat->boundary > 0) {
240 			// Make sure we don't cross a boundary.
241 			bus_addr_t boundary_addr = (phys_addr + dmat->boundary) & boundary_mask;
242 			if (segment_size > (boundary_addr - phys_addr))
243 				segment_size = (boundary_addr - phys_addr);
244 		}
245 
246 		// Insert chunk into a segment.
247 		if (first) {
248 			segs[seg].ds_addr = phys_addr;
249 			segs[seg].ds_len = segment_size;
250 			first = false;
251 		} else {
252 			// If possible, coalesce into the previous segment.
253 			if (phys_addr == last_phys_addr
254 			        && (segs[seg].ds_len + segment_size) <= dmat->maxsegsz
255 					&& (dmat->boundary == 0
256 						|| (segs[seg].ds_addr & boundary_mask)
257 							== (phys_addr & boundary_mask))) {
258 				segs[seg].ds_len += segment_size;
259 			} else {
260 				if (++seg >= dmat->maxsegments)
261 					break;
262 				segs[seg].ds_addr = phys_addr;
263 				segs[seg].ds_len = segment_size;
264 			}
265 		}
266 
267 		last_phys_addr = phys_addr + segment_size;
268 		virtual_addr += segment_size;
269 		buflen -= segment_size;
270 	}
271 
272 	*lastaddrp = last_phys_addr;
273 	return (buflen != 0 ? EFBIG : 0);
274 }
275 
276 
277 int
278 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
279 	bus_size_t buflen, bus_dmamap_callback_t *callback,
280 	void *callback_arg, int flags)
281 {
282 	bus_addr_t lastaddr = 0;
283 	int error, nsegs = 0;
284 
285 	if (dmat->segments == NULL) {
286 		dmat->segments = (bus_dma_segment_t*)kernel_malloc(
287 			sizeof(bus_dma_segment_t) * dmat->maxsegments, M_DEVBUF,
288 			M_ZERO | M_NOWAIT);
289 		if (dmat->segments == NULL)
290 			return ENOMEM;
291 	}
292 
293 	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, flags,
294 		&lastaddr, dmat->segments, nsegs, true);
295 
296 	if (error)
297 		(*callback)(callback_arg, dmat->segments, 0, error);
298 	else
299 		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
300 
301 	// ENOMEM is returned; all other errors are only sent to the callback.
302 	if (error == ENOMEM)
303 		return error;
304 	return 0;
305 }
306 
307 
308 int
309 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
310 	bus_dmamap_callback2_t* callback, void* callback_arg, int flags)
311 {
312 	M_ASSERTPKTHDR(mb);
313 
314 	if (dmat->segments == NULL) {
315 		dmat->segments = (bus_dma_segment_t*)kernel_malloc(
316 			sizeof(bus_dma_segment_t) * dmat->maxsegments, M_DEVBUF,
317 			M_ZERO | M_NOWAIT);
318 		if (dmat->segments == NULL)
319 			return ENOMEM;
320 	}
321 
322 	int nsegs = 0, error = 0;
323 	if (mb->m_pkthdr.len <= dmat->maxsize) {
324 		bool first = true;
325 		bus_addr_t lastaddr = 0;
326 		for (struct mbuf* m = mb; m != NULL && error == 0; m = m->m_next) {
327 			if (m->m_len <= 0)
328 				continue;
329 
330 			error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
331 				flags, &lastaddr, dmat->segments, nsegs, first);
332 			first = false;
333 		}
334 	} else {
335 		error = EINVAL;
336 	}
337 
338 	if (error) {
339 		(*callback)(callback_arg, dmat->segments, 0, 0, error);
340 	} else {
341 		(*callback)(callback_arg, dmat->segments, nsegs + 1, mb->m_pkthdr.len,
342 			error);
343 	}
344 	return error;
345 }
346 
347 
348 int
349 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf* mb,
350 	bus_dma_segment_t* segs, int* nsegs, int flags)
351 {
352 	M_ASSERTPKTHDR(mb);
353 
354 	*nsegs = 0;
355 	int error = 0;
356 	if (mb->m_pkthdr.len <= dmat->maxsize) {
357 		bool first = true;
358 		bus_addr_t lastaddr = 0;
359 
360 		for (struct mbuf* m = mb; m != NULL && error == 0; m = m->m_next) {
361 			if (m->m_len <= 0)
362 				continue;
363 
364 			error = _bus_dmamap_load_buffer(dmat, map, m->m_data, m->m_len,
365 				flags, &lastaddr, segs, *nsegs, first);
366 			first = false;
367 		}
368 	} else {
369 		error = EINVAL;
370 	}
371 
372 	++*nsegs;
373 	return error;
374 }
375 
376 
377 void
378 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
379 {
380 	// We never allocate bounce pages; nothing to do.
381 }
382 
383 
384 void
385 _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_dmasync_op_t)
386 {
387 	// We never bounce; nothing to do.
388 }
389