1 /*-
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: stable/11/sys/kern/uipc_mbuf.c 331847 2018-03-31 17:28:30Z avos $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/sysctl.h>
43 #include <sys/protosw.h>
44 #include <sys/uio.h>
45
46 #if 0
47 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init,
48 "struct mbuf *", "mbufinfo_t *",
49 "uint32_t", "uint32_t",
50 "uint16_t", "uint16_t",
51 "uint32_t", "uint32_t",
52 "uint32_t", "uint32_t");
53
54 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr,
55 "uint32_t", "uint32_t",
56 "uint16_t", "uint16_t",
57 "struct mbuf *", "mbufinfo_t *");
58
59 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get,
60 "uint32_t", "uint32_t",
61 "uint16_t", "uint16_t",
62 "struct mbuf *", "mbufinfo_t *");
63
64 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl,
65 "uint32_t", "uint32_t",
66 "uint16_t", "uint16_t",
67 "uint32_t", "uint32_t",
68 "struct mbuf *", "mbufinfo_t *");
69
70 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget,
71 "struct mbuf *", "mbufinfo_t *",
72 "uint32_t", "uint32_t",
73 "uint32_t", "uint32_t");
74
75 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget,
76 "struct mbuf *", "mbufinfo_t *",
77 "uint32_t", "uint32_t",
78 "uint32_t", "uint32_t",
79 "void*", "void*");
80
81 SDT_PROBE_DEFINE(sdt, , , m__cljset);
82
83 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free,
84 "struct mbuf *", "mbufinfo_t *");
85
86 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem,
87 "struct mbuf *", "mbufinfo_t *");
88
89 #include <security/mac/mac_framework.h>
90
91 int max_linkhdr;
92 int max_protohdr;
93 int max_hdr;
94 int max_datalen;
95 #ifdef MBUF_STRESS_TEST
96 int m_defragpackets;
97 int m_defragbytes;
98 int m_defraguseless;
99 int m_defragfailure;
100 int m_defragrandomfailures;
101 #endif
102
103 /*
104 * sysctl(8) exported objects
105 */
106 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
107 &max_linkhdr, 0, "Size of largest link layer header");
108 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
109 &max_protohdr, 0, "Size of largest protocol layer header");
110 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
111 &max_hdr, 0, "Size of largest link plus protocol header");
112 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
113 &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
114 #ifdef MBUF_STRESS_TEST
115 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
116 &m_defragpackets, 0, "");
117 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
118 &m_defragbytes, 0, "");
119 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
120 &m_defraguseless, 0, "");
121 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
122 &m_defragfailure, 0, "");
123 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
124 &m_defragrandomfailures, 0, "");
125 #endif
126 #endif
127
128 /*
129 * Ensure the correct size of various mbuf parameters. It could be off due
130 * to compiler-induced padding and alignment artifacts.
131 */
132 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
133 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
134
135 /*
136 * mbuf data storage should be 64-bit aligned regardless of architectural
137 * pointer size; check this is the case with and without a packet header.
138 */
139 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0);
140 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0);
141
142 /*
143 * While the specific values here don't matter too much (i.e., +/- a few
144 * words), we do want to ensure that changes to these values are carefully
145 * reasoned about and properly documented. This is especially the case as
146 * network-protocol and device-driver modules encode these layouts, and must
147 * be recompiled if the structures change. Check these values at compile time
148 * against the ones documented in comments in mbuf.h.
149 *
150 * NB: Possibly they should be documented there via #define's and not just
151 * comments.
152 */
153 #ifndef __HAIKU__
154 #if defined(__LP64__)
155 CTASSERT(offsetof(struct mbuf, m_dat) == 32);
156 CTASSERT(sizeof(struct pkthdr) == 56);
157 CTASSERT(sizeof(struct m_ext) == 48);
158 #else
159 CTASSERT(offsetof(struct mbuf, m_dat) == 24);
160 CTASSERT(sizeof(struct pkthdr) == 48);
161 CTASSERT(sizeof(struct m_ext) == 28);
162 #endif
163 #endif
164
165 /*
166 * Assert that the queue(3) macros produce code of the same size as an old
167 * plain pointer does.
168 */
169 #ifdef INVARIANTS
170 static struct mbuf __used m_assertbuf;
171 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next));
172 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next));
173 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt));
174 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt));
175 #endif
176
177 /*
178 * Attach the cluster from *m to *n, set up m_ext in *n
179 * and bump the refcount of the cluster.
180 */
181 void
mb_dupcl(struct mbuf * n,struct mbuf * m)182 mb_dupcl(struct mbuf *n, struct mbuf *m)
183 {
184 volatile u_int *refcnt;
185
186 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
187 KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n));
188
189 n->m_ext = m->m_ext;
190 n->m_flags |= M_EXT;
191 n->m_flags |= m->m_flags & M_RDONLY;
192
193 /* See if this is the mbuf that holds the embedded refcount. */
194 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
195 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count;
196 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF;
197 } else {
198 KASSERT(m->m_ext.ext_cnt != NULL,
199 ("%s: no refcounting pointer on %p", __func__, m));
200 refcnt = m->m_ext.ext_cnt;
201 }
202
203 if (*refcnt == 1)
204 *refcnt += 1;
205 else
206 atomic_add_int(refcnt, 1);
207 }
208
209 void
m_demote_pkthdr(struct mbuf * m)210 m_demote_pkthdr(struct mbuf *m)
211 {
212
213 M_ASSERTPKTHDR(m);
214
215 m_tag_delete_chain(m, NULL);
216 m->m_flags &= ~M_PKTHDR;
217 bzero(&m->m_pkthdr, sizeof(struct pkthdr));
218 }
219
220 /*
221 * Clean up mbuf (chain) from any tags and packet headers.
222 * If "all" is set then the first mbuf in the chain will be
223 * cleaned too.
224 */
225 void
m_demote(struct mbuf * m0,int all,int flags)226 m_demote(struct mbuf *m0, int all, int flags)
227 {
228 struct mbuf *m;
229
230 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
231 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p",
232 __func__, m, m0));
233 if (m->m_flags & M_PKTHDR)
234 m_demote_pkthdr(m);
235 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | flags);
236 }
237 }
238
239 /*
240 * Sanity checks on mbuf (chain) for use in KASSERT() and general
241 * debugging.
242 * Returns 0 or panics when bad and 1 on all tests passed.
243 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
244 * blow up later.
245 */
246 int
m_sanity(struct mbuf * m0,int sanitize)247 m_sanity(struct mbuf *m0, int sanitize)
248 {
249 struct mbuf *m;
250 caddr_t a, b;
251 int pktlen = 0;
252
253 #ifdef INVARIANTS
254 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m)
255 #else
256 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m)
257 #endif
258
259 for (m = m0; m != NULL; m = m->m_next) {
260 /*
261 * Basic pointer checks. If any of these fails then some
262 * unrelated kernel memory before or after us is trashed.
263 * No way to recover from that.
264 */
265 a = M_START(m);
266 b = a + M_SIZE(m);
267 if ((caddr_t)m->m_data < a)
268 M_SANITY_ACTION("m_data outside mbuf data range left");
269 if ((caddr_t)m->m_data > b)
270 M_SANITY_ACTION("m_data outside mbuf data range right");
271 if ((caddr_t)m->m_data + m->m_len > b)
272 M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
273
274 /* m->m_nextpkt may only be set on first mbuf in chain. */
275 if (m != m0 && m->m_nextpkt != NULL) {
276 if (sanitize) {
277 m_freem(m->m_nextpkt);
278 m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
279 } else
280 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
281 }
282
283 /* packet length (not mbuf length!) calculation */
284 if (m0->m_flags & M_PKTHDR)
285 pktlen += m->m_len;
286
287 /* m_tags may only be attached to first mbuf in chain. */
288 if (m != m0 && m->m_flags & M_PKTHDR &&
289 !SLIST_EMPTY(&m->m_pkthdr.tags)) {
290 if (sanitize) {
291 m_tag_delete_chain(m, NULL);
292 /* put in 0xDEADC0DE perhaps? */
293 } else
294 M_SANITY_ACTION("m_tags on in-chain mbuf");
295 }
296
297 /* M_PKTHDR may only be set on first mbuf in chain */
298 if (m != m0 && m->m_flags & M_PKTHDR) {
299 if (sanitize) {
300 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
301 m->m_flags &= ~M_PKTHDR;
302 /* put in 0xDEADCODE and leave hdr flag in */
303 } else
304 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
305 }
306 }
307 m = m0;
308 if (pktlen && pktlen != m->m_pkthdr.len) {
309 if (sanitize)
310 m->m_pkthdr.len = 0;
311 else
312 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
313 }
314 return 1;
315
316 #undef M_SANITY_ACTION
317 }
318
319 /*
320 * Non-inlined part of m_init().
321 */
322 int
m_pkthdr_init(struct mbuf * m,int how)323 m_pkthdr_init(struct mbuf *m, int how)
324 {
325 #ifdef MAC
326 int error;
327 #endif
328 m->m_data = m->m_pktdat;
329 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
330 #ifdef MAC
331 /* If the label init fails, fail the alloc */
332 error = mac_mbuf_init(m, how);
333 if (error)
334 return (error);
335 #endif
336
337 return (0);
338 }
339
340 /*
341 * "Move" mbuf pkthdr from "from" to "to".
342 * "from" must have M_PKTHDR set, and "to" must be empty.
343 */
344 void
m_move_pkthdr(struct mbuf * to,struct mbuf * from)345 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
346 {
347
348 #if 0
349 /* see below for why these are not enabled */
350 M_ASSERTPKTHDR(to);
351 /* Note: with MAC, this may not be a good assertion. */
352 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
353 ("m_move_pkthdr: to has tags"));
354 #endif
355 #ifdef MAC
356 /*
357 * XXXMAC: It could be this should also occur for non-MAC?
358 */
359 if (to->m_flags & M_PKTHDR)
360 m_tag_delete_chain(to, NULL);
361 #endif
362 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
363 if ((to->m_flags & M_EXT) == 0)
364 to->m_data = to->m_pktdat;
365 to->m_pkthdr = from->m_pkthdr; /* especially tags */
366 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
367 from->m_flags &= ~M_PKTHDR;
368 }
369
370 /*
371 * Duplicate "from"'s mbuf pkthdr in "to".
372 * "from" must have M_PKTHDR set, and "to" must be empty.
373 * In particular, this does a deep copy of the packet tags.
374 */
375 int
m_dup_pkthdr(struct mbuf * to,const struct mbuf * from,int how)376 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how)
377 {
378
379 #if 0
380 /*
381 * The mbuf allocator only initializes the pkthdr
382 * when the mbuf is allocated with m_gethdr(). Many users
383 * (e.g. m_copy*, m_prepend) use m_get() and then
384 * smash the pkthdr as needed causing these
385 * assertions to trip. For now just disable them.
386 */
387 M_ASSERTPKTHDR(to);
388 /* Note: with MAC, this may not be a good assertion. */
389 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
390 #endif
391 MBUF_CHECKSLEEP(how);
392 #ifdef MAC
393 if (to->m_flags & M_PKTHDR)
394 m_tag_delete_chain(to, NULL);
395 #endif
396 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
397 if ((to->m_flags & M_EXT) == 0)
398 to->m_data = to->m_pktdat;
399 to->m_pkthdr = from->m_pkthdr;
400 SLIST_INIT(&to->m_pkthdr.tags);
401 return (m_tag_copy_chain(to, from, how));
402 }
403
404 /*
405 * Lesser-used path for M_PREPEND:
406 * allocate new mbuf to prepend to chain,
407 * copy junk along.
408 */
409 struct mbuf *
m_prepend(struct mbuf * m,int len,int how)410 m_prepend(struct mbuf *m, int len, int how)
411 {
412 struct mbuf *mn;
413
414 if (m->m_flags & M_PKTHDR)
415 mn = m_gethdr(how, m->m_type);
416 else
417 mn = m_get(how, m->m_type);
418 if (mn == NULL) {
419 m_freem(m);
420 return (NULL);
421 }
422 if (m->m_flags & M_PKTHDR)
423 m_move_pkthdr(mn, m);
424 mn->m_next = m;
425 m = mn;
426 if (len < M_SIZE(m))
427 M_ALIGN(m, len);
428 m->m_len = len;
429 return (m);
430 }
431
432 /*
433 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
434 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
435 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
436 * Note that the copy is read-only, because clusters are not copied,
437 * only their reference counts are incremented.
438 */
439 struct mbuf *
m_copym(struct mbuf * m,int off0,int len,int wait)440 m_copym(struct mbuf *m, int off0, int len, int wait)
441 {
442 struct mbuf *n, **np;
443 int off = off0;
444 struct mbuf *top;
445 int copyhdr = 0;
446
447 KASSERT(off >= 0, ("m_copym, negative off %d", off));
448 KASSERT(len >= 0, ("m_copym, negative len %d", len));
449 MBUF_CHECKSLEEP(wait);
450 if (off == 0 && m->m_flags & M_PKTHDR)
451 copyhdr = 1;
452 while (off > 0) {
453 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
454 if (off < m->m_len)
455 break;
456 off -= m->m_len;
457 m = m->m_next;
458 }
459 np = ⊤
460 top = NULL;
461 while (len > 0) {
462 if (m == NULL) {
463 KASSERT(len == M_COPYALL,
464 ("m_copym, length > size of mbuf chain"));
465 break;
466 }
467 if (copyhdr)
468 n = m_gethdr(wait, m->m_type);
469 else
470 n = m_get(wait, m->m_type);
471 *np = n;
472 if (n == NULL)
473 goto nospace;
474 if (copyhdr) {
475 if (!m_dup_pkthdr(n, m, wait))
476 goto nospace;
477 if (len == M_COPYALL)
478 n->m_pkthdr.len -= off0;
479 else
480 n->m_pkthdr.len = len;
481 copyhdr = 0;
482 }
483 n->m_len = min(len, m->m_len - off);
484 if (m->m_flags & M_EXT) {
485 n->m_data = m->m_data + off;
486 mb_dupcl(n, m);
487 } else
488 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
489 (u_int)n->m_len);
490 if (len != M_COPYALL)
491 len -= n->m_len;
492 off = 0;
493 m = m->m_next;
494 np = &n->m_next;
495 }
496
497 return (top);
498 nospace:
499 m_freem(top);
500 return (NULL);
501 }
502
503 /*
504 * Copy an entire packet, including header (which must be present).
505 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
506 * Note that the copy is read-only, because clusters are not copied,
507 * only their reference counts are incremented.
508 * Preserve alignment of the first mbuf so if the creator has left
509 * some room at the beginning (e.g. for inserting protocol headers)
510 * the copies still have the room available.
511 */
512 struct mbuf *
m_copypacket(struct mbuf * m,int how)513 m_copypacket(struct mbuf *m, int how)
514 {
515 struct mbuf *top, *n, *o;
516
517 MBUF_CHECKSLEEP(how);
518 n = m_get(how, m->m_type);
519 top = n;
520 if (n == NULL)
521 goto nospace;
522
523 if (!m_dup_pkthdr(n, m, how))
524 goto nospace;
525 n->m_len = m->m_len;
526 if (m->m_flags & M_EXT) {
527 n->m_data = m->m_data;
528 mb_dupcl(n, m);
529 } else {
530 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
531 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
532 }
533
534 m = m->m_next;
535 while (m) {
536 o = m_get(how, m->m_type);
537 if (o == NULL)
538 goto nospace;
539
540 n->m_next = o;
541 n = n->m_next;
542
543 n->m_len = m->m_len;
544 if (m->m_flags & M_EXT) {
545 n->m_data = m->m_data;
546 mb_dupcl(n, m);
547 } else {
548 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
549 }
550
551 m = m->m_next;
552 }
553 return top;
554 nospace:
555 m_freem(top);
556 return (NULL);
557 }
558
559 /*
560 * Copy data from an mbuf chain starting "off" bytes from the beginning,
561 * continuing for "len" bytes, into the indicated buffer.
562 */
563 void
m_copydata(const struct mbuf * m,int off,int len,caddr_t cp)564 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
565 {
566 u_int count;
567
568 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
569 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
570 while (off > 0) {
571 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
572 if (off < m->m_len)
573 break;
574 off -= m->m_len;
575 m = m->m_next;
576 }
577 while (len > 0) {
578 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
579 count = min(m->m_len - off, len);
580 bcopy(mtod(m, caddr_t) + off, cp, count);
581 len -= count;
582 cp += count;
583 off = 0;
584 m = m->m_next;
585 }
586 }
587
588 /*
589 * Copy a packet header mbuf chain into a completely new chain, including
590 * copying any mbuf clusters. Use this instead of m_copypacket() when
591 * you need a writable copy of an mbuf chain.
592 */
593 struct mbuf *
m_dup(const struct mbuf * m,int how)594 m_dup(const struct mbuf *m, int how)
595 {
596 struct mbuf **p, *top = NULL;
597 int remain, moff, nsize;
598
599 MBUF_CHECKSLEEP(how);
600 /* Sanity check */
601 if (m == NULL)
602 return (NULL);
603 M_ASSERTPKTHDR(m);
604
605 /* While there's more data, get a new mbuf, tack it on, and fill it */
606 remain = m->m_pkthdr.len;
607 moff = 0;
608 p = ⊤
609 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
610 struct mbuf *n;
611
612 /* Get the next new mbuf */
613 if (remain >= MINCLSIZE) {
614 n = m_getcl(how, m->m_type, 0);
615 nsize = MCLBYTES;
616 } else {
617 n = m_get(how, m->m_type);
618 nsize = MLEN;
619 }
620 if (n == NULL)
621 goto nospace;
622
623 if (top == NULL) { /* First one, must be PKTHDR */
624 if (!m_dup_pkthdr(n, m, how)) {
625 m_free(n);
626 goto nospace;
627 }
628 if ((n->m_flags & M_EXT) == 0)
629 nsize = MHLEN;
630 n->m_flags &= ~M_RDONLY;
631 }
632 n->m_len = 0;
633
634 /* Link it into the new chain */
635 *p = n;
636 p = &n->m_next;
637
638 /* Copy data from original mbuf(s) into new mbuf */
639 while (n->m_len < nsize && m != NULL) {
640 int chunk = min(nsize - n->m_len, m->m_len - moff);
641
642 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
643 moff += chunk;
644 n->m_len += chunk;
645 remain -= chunk;
646 if (moff == m->m_len) {
647 m = m->m_next;
648 moff = 0;
649 }
650 }
651
652 /* Check correct total mbuf length */
653 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
654 ("%s: bogus m_pkthdr.len", __func__));
655 }
656 return (top);
657
658 nospace:
659 m_freem(top);
660 return (NULL);
661 }
662
663 /*
664 * Concatenate mbuf chain n to m.
665 * Both chains must be of the same type (e.g. MT_DATA).
666 * Any m_pkthdr is not updated.
667 */
668 void
m_cat(struct mbuf * m,struct mbuf * n)669 m_cat(struct mbuf *m, struct mbuf *n)
670 {
671 while (m->m_next)
672 m = m->m_next;
673 while (n) {
674 if (!M_WRITABLE(m) ||
675 M_TRAILINGSPACE(m) < n->m_len) {
676 /* just join the two chains */
677 m->m_next = n;
678 return;
679 }
680 /* splat the data from one into the other */
681 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
682 (u_int)n->m_len);
683 m->m_len += n->m_len;
684 n = m_free(n);
685 }
686 }
687
688 /*
689 * Concatenate two pkthdr mbuf chains.
690 */
691 void
m_catpkt(struct mbuf * m,struct mbuf * n)692 m_catpkt(struct mbuf *m, struct mbuf *n)
693 {
694
695 M_ASSERTPKTHDR(m);
696 M_ASSERTPKTHDR(n);
697
698 m->m_pkthdr.len += n->m_pkthdr.len;
699 m_demote(n, 1, 0);
700
701 m_cat(m, n);
702 }
703
704 void
m_adj(struct mbuf * mp,int req_len)705 m_adj(struct mbuf *mp, int req_len)
706 {
707 int len = req_len;
708 struct mbuf *m;
709 int count;
710
711 if ((m = mp) == NULL)
712 return;
713 if (len >= 0) {
714 /*
715 * Trim from head.
716 */
717 while (m != NULL && len > 0) {
718 if (m->m_len <= len) {
719 len -= m->m_len;
720 m->m_len = 0;
721 m = m->m_next;
722 } else {
723 m->m_len -= len;
724 m->m_data += len;
725 len = 0;
726 }
727 }
728 if (mp->m_flags & M_PKTHDR)
729 mp->m_pkthdr.len -= (req_len - len);
730 } else {
731 /*
732 * Trim from tail. Scan the mbuf chain,
733 * calculating its length and finding the last mbuf.
734 * If the adjustment only affects this mbuf, then just
735 * adjust and return. Otherwise, rescan and truncate
736 * after the remaining size.
737 */
738 len = -len;
739 count = 0;
740 for (;;) {
741 count += m->m_len;
742 if (m->m_next == (struct mbuf *)0)
743 break;
744 m = m->m_next;
745 }
746 if (m->m_len >= len) {
747 m->m_len -= len;
748 if (mp->m_flags & M_PKTHDR)
749 mp->m_pkthdr.len -= len;
750 return;
751 }
752 count -= len;
753 if (count < 0)
754 count = 0;
755 /*
756 * Correct length for chain is "count".
757 * Find the mbuf with last data, adjust its length,
758 * and toss data from remaining mbufs on chain.
759 */
760 m = mp;
761 if (m->m_flags & M_PKTHDR)
762 m->m_pkthdr.len = count;
763 for (; m; m = m->m_next) {
764 if (m->m_len >= count) {
765 m->m_len = count;
766 if (m->m_next != NULL) {
767 m_freem(m->m_next);
768 m->m_next = NULL;
769 }
770 break;
771 }
772 count -= m->m_len;
773 }
774 }
775 }
776
777 /*
778 * Rearange an mbuf chain so that len bytes are contiguous
779 * and in the data area of an mbuf (so that mtod will work
780 * for a structure of size len). Returns the resulting
781 * mbuf chain on success, frees it and returns null on failure.
782 * If there is room, it will add up to max_protohdr-len extra bytes to the
783 * contiguous region in an attempt to avoid being called next time.
784 */
785 struct mbuf *
m_pullup(struct mbuf * n,int len)786 m_pullup(struct mbuf *n, int len)
787 {
788 struct mbuf *m;
789 int count;
790 int space;
791
792 /*
793 * If first mbuf has no cluster, and has room for len bytes
794 * without shifting current data, pullup into it,
795 * otherwise allocate a new mbuf to prepend to the chain.
796 */
797 if ((n->m_flags & M_EXT) == 0 &&
798 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
799 if (n->m_len >= len)
800 return (n);
801 m = n;
802 n = n->m_next;
803 len -= m->m_len;
804 } else {
805 if (len > MHLEN)
806 goto bad;
807 m = m_get(M_NOWAIT, n->m_type);
808 if (m == NULL)
809 goto bad;
810 if (n->m_flags & M_PKTHDR)
811 m_move_pkthdr(m, n);
812 }
813 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
814 do {
815 count = min(min(max(len, max_protohdr), space), n->m_len);
816 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
817 (u_int)count);
818 len -= count;
819 m->m_len += count;
820 n->m_len -= count;
821 space -= count;
822 if (n->m_len)
823 n->m_data += count;
824 else
825 n = m_free(n);
826 } while (len > 0 && n);
827 if (len > 0) {
828 (void) m_free(m);
829 goto bad;
830 }
831 m->m_next = n;
832 return (m);
833 bad:
834 m_freem(n);
835 return (NULL);
836 }
837
838 /*
839 * Like m_pullup(), except a new mbuf is always allocated, and we allow
840 * the amount of empty space before the data in the new mbuf to be specified
841 * (in the event that the caller expects to prepend later).
842 */
843 struct mbuf *
m_copyup(struct mbuf * n,int len,int dstoff)844 m_copyup(struct mbuf *n, int len, int dstoff)
845 {
846 struct mbuf *m;
847 int count, space;
848
849 if (len > (MHLEN - dstoff))
850 goto bad;
851 m = m_get(M_NOWAIT, n->m_type);
852 if (m == NULL)
853 goto bad;
854 if (n->m_flags & M_PKTHDR)
855 m_move_pkthdr(m, n);
856 m->m_data += dstoff;
857 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
858 do {
859 count = min(min(max(len, max_protohdr), space), n->m_len);
860 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
861 (unsigned)count);
862 len -= count;
863 m->m_len += count;
864 n->m_len -= count;
865 space -= count;
866 if (n->m_len)
867 n->m_data += count;
868 else
869 n = m_free(n);
870 } while (len > 0 && n);
871 if (len > 0) {
872 (void) m_free(m);
873 goto bad;
874 }
875 m->m_next = n;
876 return (m);
877 bad:
878 m_freem(n);
879 return (NULL);
880 }
881
882 /*
883 * Partition an mbuf chain in two pieces, returning the tail --
884 * all but the first len0 bytes. In case of failure, it returns NULL and
885 * attempts to restore the chain to its original state.
886 *
887 * Note that the resulting mbufs might be read-only, because the new
888 * mbuf can end up sharing an mbuf cluster with the original mbuf if
889 * the "breaking point" happens to lie within a cluster mbuf. Use the
890 * M_WRITABLE() macro to check for this case.
891 */
892 struct mbuf *
m_split(struct mbuf * m0,int len0,int wait)893 m_split(struct mbuf *m0, int len0, int wait)
894 {
895 struct mbuf *m, *n;
896 u_int len = len0, remain;
897
898 MBUF_CHECKSLEEP(wait);
899 for (m = m0; m && len > m->m_len; m = m->m_next)
900 len -= m->m_len;
901 if (m == NULL)
902 return (NULL);
903 remain = m->m_len - len;
904 if (m0->m_flags & M_PKTHDR && remain == 0) {
905 n = m_gethdr(wait, m0->m_type);
906 if (n == NULL)
907 return (NULL);
908 n->m_next = m->m_next;
909 m->m_next = NULL;
910 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
911 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
912 m0->m_pkthdr.len = len0;
913 return (n);
914 } else if (m0->m_flags & M_PKTHDR) {
915 n = m_gethdr(wait, m0->m_type);
916 if (n == NULL)
917 return (NULL);
918 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
919 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
920 m0->m_pkthdr.len = len0;
921 if (m->m_flags & M_EXT)
922 goto extpacket;
923 if (remain > MHLEN) {
924 /* m can't be the lead packet */
925 M_ALIGN(n, 0);
926 n->m_next = m_split(m, len, wait);
927 if (n->m_next == NULL) {
928 (void) m_free(n);
929 return (NULL);
930 } else {
931 n->m_len = 0;
932 return (n);
933 }
934 } else
935 M_ALIGN(n, remain);
936 } else if (remain == 0) {
937 n = m->m_next;
938 m->m_next = NULL;
939 return (n);
940 } else {
941 n = m_get(wait, m->m_type);
942 if (n == NULL)
943 return (NULL);
944 M_ALIGN(n, remain);
945 }
946 extpacket:
947 if (m->m_flags & M_EXT) {
948 n->m_data = m->m_data + len;
949 mb_dupcl(n, m);
950 } else {
951 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
952 }
953 n->m_len = remain;
954 m->m_len = len;
955 n->m_next = m->m_next;
956 m->m_next = NULL;
957 return (n);
958 }
959 /*
960 * Routine to copy from device local memory into mbufs.
961 * Note that `off' argument is offset into first mbuf of target chain from
962 * which to begin copying the data to.
963 */
964 struct mbuf *
m_devget(char * buf,int totlen,int off,struct ifnet * ifp,void (* copy)(char * from,caddr_t to,u_int len))965 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
966 void (*copy)(char *from, caddr_t to, u_int len))
967 {
968 struct mbuf *m;
969 struct mbuf *top = NULL, **mp = ⊤
970 int len;
971
972 if (off < 0 || off > MHLEN)
973 return (NULL);
974
975 while (totlen > 0) {
976 if (top == NULL) { /* First one, must be PKTHDR */
977 if (totlen + off >= MINCLSIZE) {
978 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
979 len = MCLBYTES;
980 } else {
981 m = m_gethdr(M_NOWAIT, MT_DATA);
982 len = MHLEN;
983
984 /* Place initial small packet/header at end of mbuf */
985 if (m && totlen + off + max_linkhdr <= MHLEN) {
986 m->m_data += max_linkhdr;
987 len -= max_linkhdr;
988 }
989 }
990 if (m == NULL)
991 return NULL;
992 m->m_pkthdr.rcvif = ifp;
993 m->m_pkthdr.len = totlen;
994 } else {
995 if (totlen + off >= MINCLSIZE) {
996 m = m_getcl(M_NOWAIT, MT_DATA, 0);
997 len = MCLBYTES;
998 } else {
999 m = m_get(M_NOWAIT, MT_DATA);
1000 len = MLEN;
1001 }
1002 if (m == NULL) {
1003 m_freem(top);
1004 return NULL;
1005 }
1006 }
1007 if (off) {
1008 m->m_data += off;
1009 len -= off;
1010 off = 0;
1011 }
1012 m->m_len = len = min(totlen, len);
1013 if (copy)
1014 copy(buf, mtod(m, caddr_t), (u_int)len);
1015 else
1016 bcopy(buf, mtod(m, caddr_t), (u_int)len);
1017 buf += len;
1018 *mp = m;
1019 mp = &m->m_next;
1020 totlen -= len;
1021 }
1022 return (top);
1023 }
1024
1025 /*
1026 * Copy data from a buffer back into the indicated mbuf chain,
1027 * starting "off" bytes from the beginning, extending the mbuf
1028 * chain if necessary.
1029 */
1030 void
m_copyback(struct mbuf * m0,int off,int len,c_caddr_t cp)1031 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1032 {
1033 int mlen;
1034 struct mbuf *m = m0, *n;
1035 int totlen = 0;
1036
1037 if (m0 == NULL)
1038 return;
1039 while (off > (mlen = m->m_len)) {
1040 off -= mlen;
1041 totlen += mlen;
1042 if (m->m_next == NULL) {
1043 n = m_get(M_NOWAIT, m->m_type);
1044 if (n == NULL)
1045 goto out;
1046 bzero(mtod(n, caddr_t), MLEN);
1047 n->m_len = min(MLEN, len + off);
1048 m->m_next = n;
1049 }
1050 m = m->m_next;
1051 }
1052 while (len > 0) {
1053 if (m->m_next == NULL && (len > m->m_len - off)) {
1054 m->m_len += min(len - (m->m_len - off),
1055 M_TRAILINGSPACE(m));
1056 }
1057 mlen = min (m->m_len - off, len);
1058 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1059 cp += mlen;
1060 len -= mlen;
1061 mlen += off;
1062 off = 0;
1063 totlen += mlen;
1064 if (len == 0)
1065 break;
1066 if (m->m_next == NULL) {
1067 n = m_get(M_NOWAIT, m->m_type);
1068 if (n == NULL)
1069 break;
1070 n->m_len = min(MLEN, len);
1071 m->m_next = n;
1072 }
1073 m = m->m_next;
1074 }
1075 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1076 m->m_pkthdr.len = totlen;
1077 }
1078
1079 /*
1080 * Append the specified data to the indicated mbuf chain,
1081 * Extend the mbuf chain if the new data does not fit in
1082 * existing space.
1083 *
1084 * Return 1 if able to complete the job; otherwise 0.
1085 */
1086 int
m_append(struct mbuf * m0,int len,c_caddr_t cp)1087 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1088 {
1089 struct mbuf *m, *n;
1090 int remainder, space;
1091
1092 for (m = m0; m->m_next != NULL; m = m->m_next)
1093 ;
1094 remainder = len;
1095 space = M_TRAILINGSPACE(m);
1096 if (space > 0) {
1097 /*
1098 * Copy into available space.
1099 */
1100 if (space > remainder)
1101 space = remainder;
1102 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1103 m->m_len += space;
1104 cp += space, remainder -= space;
1105 }
1106 while (remainder > 0) {
1107 /*
1108 * Allocate a new mbuf; could check space
1109 * and allocate a cluster instead.
1110 */
1111 n = m_get(M_NOWAIT, m->m_type);
1112 if (n == NULL)
1113 break;
1114 n->m_len = min(MLEN, remainder);
1115 bcopy(cp, mtod(n, caddr_t), n->m_len);
1116 cp += n->m_len, remainder -= n->m_len;
1117 m->m_next = n;
1118 m = n;
1119 }
1120 if (m0->m_flags & M_PKTHDR)
1121 m0->m_pkthdr.len += len - remainder;
1122 return (remainder == 0);
1123 }
1124
1125 /*
1126 * Apply function f to the data in an mbuf chain starting "off" bytes from
1127 * the beginning, continuing for "len" bytes.
1128 */
1129 int
m_apply(struct mbuf * m,int off,int len,int (* f)(void *,void *,u_int),void * arg)1130 m_apply(struct mbuf *m, int off, int len,
1131 int (*f)(void *, void *, u_int), void *arg)
1132 {
1133 u_int count;
1134 int rval;
1135
1136 KASSERT(off >= 0, ("m_apply, negative off %d", off));
1137 KASSERT(len >= 0, ("m_apply, negative len %d", len));
1138 while (off > 0) {
1139 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1140 if (off < m->m_len)
1141 break;
1142 off -= m->m_len;
1143 m = m->m_next;
1144 }
1145 while (len > 0) {
1146 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1147 count = min(m->m_len - off, len);
1148 rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1149 if (rval)
1150 return (rval);
1151 len -= count;
1152 off = 0;
1153 m = m->m_next;
1154 }
1155 return (0);
1156 }
1157
1158 /*
1159 * Return a pointer to mbuf/offset of location in mbuf chain.
1160 */
1161 struct mbuf *
m_getptr(struct mbuf * m,int loc,int * off)1162 m_getptr(struct mbuf *m, int loc, int *off)
1163 {
1164
1165 while (loc >= 0) {
1166 /* Normal end of search. */
1167 if (m->m_len > loc) {
1168 *off = loc;
1169 return (m);
1170 } else {
1171 loc -= m->m_len;
1172 if (m->m_next == NULL) {
1173 if (loc == 0) {
1174 /* Point at the end of valid data. */
1175 *off = m->m_len;
1176 return (m);
1177 }
1178 return (NULL);
1179 }
1180 m = m->m_next;
1181 }
1182 }
1183 return (NULL);
1184 }
1185
1186 void
m_print(const struct mbuf * m,int maxlen)1187 m_print(const struct mbuf *m, int maxlen)
1188 {
1189 int len;
1190 int pdata;
1191 const struct mbuf *m2;
1192
1193 if (m == NULL) {
1194 printf("mbuf: %p\n", m);
1195 return;
1196 }
1197
1198 if (m->m_flags & M_PKTHDR)
1199 len = m->m_pkthdr.len;
1200 else
1201 len = -1;
1202 m2 = m;
1203 while (m2 != NULL && (len == -1 || len)) {
1204 pdata = m2->m_len;
1205 if (maxlen != -1 && pdata > maxlen)
1206 pdata = maxlen;
1207 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1208 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1209 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1210 "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1211 if (pdata)
1212 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1213 if (len != -1)
1214 len -= m2->m_len;
1215 m2 = m2->m_next;
1216 }
1217 if (len > 0)
1218 printf("%d bytes unaccounted for.\n", len);
1219 return;
1220 }
1221
1222 u_int
m_fixhdr(struct mbuf * m0)1223 m_fixhdr(struct mbuf *m0)
1224 {
1225 u_int len;
1226
1227 len = m_length(m0, NULL);
1228 m0->m_pkthdr.len = len;
1229 return (len);
1230 }
1231
1232 u_int
m_length(struct mbuf * m0,struct mbuf ** last)1233 m_length(struct mbuf *m0, struct mbuf **last)
1234 {
1235 struct mbuf *m;
1236 u_int len;
1237
1238 len = 0;
1239 for (m = m0; m != NULL; m = m->m_next) {
1240 len += m->m_len;
1241 if (m->m_next == NULL)
1242 break;
1243 }
1244 if (last != NULL)
1245 *last = m;
1246 return (len);
1247 }
1248
1249 /*
1250 * Defragment a mbuf chain, returning the shortest possible
1251 * chain of mbufs and clusters. If allocation fails and
1252 * this cannot be completed, NULL will be returned, but
1253 * the passed in chain will be unchanged. Upon success,
1254 * the original chain will be freed, and the new chain
1255 * will be returned.
1256 *
1257 * If a non-packet header is passed in, the original
1258 * mbuf (chain?) will be returned unharmed.
1259 */
1260 struct mbuf *
m_defrag(struct mbuf * m0,int how)1261 m_defrag(struct mbuf *m0, int how)
1262 {
1263 struct mbuf *m_new = NULL, *m_final = NULL;
1264 int progress = 0, length;
1265
1266 MBUF_CHECKSLEEP(how);
1267 if (!(m0->m_flags & M_PKTHDR))
1268 return (m0);
1269
1270 m_fixhdr(m0); /* Needed sanity check */
1271
1272 #ifdef MBUF_STRESS_TEST
1273 if (m_defragrandomfailures) {
1274 int temp = arc4random() & 0xff;
1275 if (temp == 0xba)
1276 goto nospace;
1277 }
1278 #endif
1279
1280 if (m0->m_pkthdr.len > MHLEN)
1281 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1282 else
1283 m_final = m_gethdr(how, MT_DATA);
1284
1285 if (m_final == NULL)
1286 goto nospace;
1287
1288 if (m_dup_pkthdr(m_final, m0, how) == 0)
1289 goto nospace;
1290
1291 m_new = m_final;
1292
1293 while (progress < m0->m_pkthdr.len) {
1294 length = m0->m_pkthdr.len - progress;
1295 if (length > MCLBYTES)
1296 length = MCLBYTES;
1297
1298 if (m_new == NULL) {
1299 if (length > MLEN)
1300 m_new = m_getcl(how, MT_DATA, 0);
1301 else
1302 m_new = m_get(how, MT_DATA);
1303 if (m_new == NULL)
1304 goto nospace;
1305 }
1306
1307 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1308 progress += length;
1309 m_new->m_len = length;
1310 if (m_new != m_final)
1311 m_cat(m_final, m_new);
1312 m_new = NULL;
1313 }
1314 #ifdef MBUF_STRESS_TEST
1315 if (m0->m_next == NULL)
1316 m_defraguseless++;
1317 #endif
1318 m_freem(m0);
1319 m0 = m_final;
1320 #ifdef MBUF_STRESS_TEST
1321 m_defragpackets++;
1322 m_defragbytes += m0->m_pkthdr.len;
1323 #endif
1324 return (m0);
1325 nospace:
1326 #ifdef MBUF_STRESS_TEST
1327 m_defragfailure++;
1328 #endif
1329 if (m_final)
1330 m_freem(m_final);
1331 return (NULL);
1332 }
1333
1334 /*
1335 * Defragment an mbuf chain, returning at most maxfrags separate
1336 * mbufs+clusters. If this is not possible NULL is returned and
1337 * the original mbuf chain is left in it's present (potentially
1338 * modified) state. We use two techniques: collapsing consecutive
1339 * mbufs and replacing consecutive mbufs by a cluster.
1340 *
1341 * NB: this should really be named m_defrag but that name is taken
1342 */
1343 struct mbuf *
m_collapse(struct mbuf * m0,int how,int maxfrags)1344 m_collapse(struct mbuf *m0, int how, int maxfrags)
1345 {
1346 struct mbuf *m, *n, *n2, **prev;
1347 u_int curfrags;
1348
1349 /*
1350 * Calculate the current number of frags.
1351 */
1352 curfrags = 0;
1353 for (m = m0; m != NULL; m = m->m_next)
1354 curfrags++;
1355 /*
1356 * First, try to collapse mbufs. Note that we always collapse
1357 * towards the front so we don't need to deal with moving the
1358 * pkthdr. This may be suboptimal if the first mbuf has much
1359 * less data than the following.
1360 */
1361 m = m0;
1362 again:
1363 for (;;) {
1364 n = m->m_next;
1365 if (n == NULL)
1366 break;
1367 if (M_WRITABLE(m) &&
1368 n->m_len < M_TRAILINGSPACE(m)) {
1369 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1370 n->m_len);
1371 m->m_len += n->m_len;
1372 m->m_next = n->m_next;
1373 m_free(n);
1374 if (--curfrags <= maxfrags)
1375 return m0;
1376 } else
1377 m = n;
1378 }
1379 KASSERT(maxfrags > 1,
1380 ("maxfrags %u, but normal collapse failed", maxfrags));
1381 /*
1382 * Collapse consecutive mbufs to a cluster.
1383 */
1384 prev = &m0->m_next; /* NB: not the first mbuf */
1385 while ((n = *prev) != NULL) {
1386 if ((n2 = n->m_next) != NULL &&
1387 n->m_len + n2->m_len < MCLBYTES) {
1388 m = m_getcl(how, MT_DATA, 0);
1389 if (m == NULL)
1390 goto bad;
1391 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1392 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1393 n2->m_len);
1394 m->m_len = n->m_len + n2->m_len;
1395 m->m_next = n2->m_next;
1396 *prev = m;
1397 m_free(n);
1398 m_free(n2);
1399 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
1400 return m0;
1401 /*
1402 * Still not there, try the normal collapse
1403 * again before we allocate another cluster.
1404 */
1405 goto again;
1406 }
1407 prev = &n->m_next;
1408 }
1409 /*
1410 * No place where we can collapse to a cluster; punt.
1411 * This can occur if, for example, you request 2 frags
1412 * but the packet requires that both be clusters (we
1413 * never reallocate the first mbuf to avoid moving the
1414 * packet header).
1415 */
1416 bad:
1417 return NULL;
1418 }
1419
1420 #ifdef MBUF_STRESS_TEST
1421
1422 /*
1423 * Fragment an mbuf chain. There's no reason you'd ever want to do
1424 * this in normal usage, but it's great for stress testing various
1425 * mbuf consumers.
1426 *
1427 * If fragmentation is not possible, the original chain will be
1428 * returned.
1429 *
1430 * Possible length values:
1431 * 0 no fragmentation will occur
1432 * > 0 each fragment will be of the specified length
1433 * -1 each fragment will be the same random value in length
1434 * -2 each fragment's length will be entirely random
1435 * (Random values range from 1 to 256)
1436 */
1437 struct mbuf *
m_fragment(struct mbuf * m0,int how,int length)1438 m_fragment(struct mbuf *m0, int how, int length)
1439 {
1440 struct mbuf *m_first, *m_last;
1441 int divisor = 255, progress = 0, fraglen;
1442
1443 if (!(m0->m_flags & M_PKTHDR))
1444 return (m0);
1445
1446 if (length == 0 || length < -2)
1447 return (m0);
1448 if (length > MCLBYTES)
1449 length = MCLBYTES;
1450 if (length < 0 && divisor > MCLBYTES)
1451 divisor = MCLBYTES;
1452 if (length == -1)
1453 length = 1 + (arc4random() % divisor);
1454 if (length > 0)
1455 fraglen = length;
1456
1457 m_fixhdr(m0); /* Needed sanity check */
1458
1459 m_first = m_getcl(how, MT_DATA, M_PKTHDR);
1460 if (m_first == NULL)
1461 goto nospace;
1462
1463 if (m_dup_pkthdr(m_first, m0, how) == 0)
1464 goto nospace;
1465
1466 m_last = m_first;
1467
1468 while (progress < m0->m_pkthdr.len) {
1469 if (length == -2)
1470 fraglen = 1 + (arc4random() % divisor);
1471 if (fraglen > m0->m_pkthdr.len - progress)
1472 fraglen = m0->m_pkthdr.len - progress;
1473
1474 if (progress != 0) {
1475 struct mbuf *m_new = m_getcl(how, MT_DATA, 0);
1476 if (m_new == NULL)
1477 goto nospace;
1478
1479 m_last->m_next = m_new;
1480 m_last = m_new;
1481 }
1482
1483 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t));
1484 progress += fraglen;
1485 m_last->m_len = fraglen;
1486 }
1487 m_freem(m0);
1488 m0 = m_first;
1489 return (m0);
1490 nospace:
1491 if (m_first)
1492 m_freem(m_first);
1493 /* Return the original chain on failure */
1494 return (m0);
1495 }
1496
1497 #endif
1498
1499 #ifndef __HAIKU__
1500 /*
1501 * Copy the contents of uio into a properly sized mbuf chain.
1502 */
1503 struct mbuf *
m_uiotombuf(struct uio * uio,int how,int len,int align,int flags)1504 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1505 {
1506 struct mbuf *m, *mb;
1507 int error, length;
1508 ssize_t total;
1509 int progress = 0;
1510
1511 /*
1512 * len can be zero or an arbitrary large value bound by
1513 * the total data supplied by the uio.
1514 */
1515 if (len > 0)
1516 total = min(uio->uio_resid, len);
1517 else
1518 total = uio->uio_resid;
1519
1520 /*
1521 * The smallest unit returned by m_getm2() is a single mbuf
1522 * with pkthdr. We can't align past it.
1523 */
1524 if (align >= MHLEN)
1525 return (NULL);
1526
1527 /*
1528 * Give us the full allocation or nothing.
1529 * If len is zero return the smallest empty mbuf.
1530 */
1531 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1532 if (m == NULL)
1533 return (NULL);
1534 m->m_data += align;
1535
1536 /* Fill all mbufs with uio data and update header information. */
1537 for (mb = m; mb != NULL; mb = mb->m_next) {
1538 length = min(M_TRAILINGSPACE(mb), total - progress);
1539
1540 error = uiomove(mtod(mb, void *), length, uio);
1541 if (error) {
1542 m_freem(m);
1543 return (NULL);
1544 }
1545
1546 mb->m_len = length;
1547 progress += length;
1548 if (flags & M_PKTHDR)
1549 m->m_pkthdr.len += length;
1550 }
1551 KASSERT(progress == total, ("%s: progress != total", __func__));
1552
1553 return (m);
1554 }
1555
1556 /*
1557 * Copy an mbuf chain into a uio limited by len if set.
1558 */
1559 int
m_mbuftouio(struct uio * uio,struct mbuf * m,int len)1560 m_mbuftouio(struct uio *uio, struct mbuf *m, int len)
1561 {
1562 int error, length, total;
1563 int progress = 0;
1564
1565 if (len > 0)
1566 total = min(uio->uio_resid, len);
1567 else
1568 total = uio->uio_resid;
1569
1570 /* Fill the uio with data from the mbufs. */
1571 for (; m != NULL; m = m->m_next) {
1572 length = min(m->m_len, total - progress);
1573
1574 error = uiomove(mtod(m, void *), length, uio);
1575 if (error)
1576 return (error);
1577
1578 progress += length;
1579 }
1580
1581 return (0);
1582 }
1583 #endif
1584
1585 /*
1586 * Create a writable copy of the mbuf chain. While doing this
1587 * we compact the chain with a goal of producing a chain with
1588 * at most two mbufs. The second mbuf in this chain is likely
1589 * to be a cluster. The primary purpose of this work is to create
1590 * a writable packet for encryption, compression, etc. The
1591 * secondary goal is to linearize the data so the data can be
1592 * passed to crypto hardware in the most efficient manner possible.
1593 */
1594 struct mbuf *
m_unshare(struct mbuf * m0,int how)1595 m_unshare(struct mbuf *m0, int how)
1596 {
1597 struct mbuf *m, *mprev;
1598 struct mbuf *n, *mfirst, *mlast;
1599 int len, off;
1600
1601 mprev = NULL;
1602 for (m = m0; m != NULL; m = mprev->m_next) {
1603 /*
1604 * Regular mbufs are ignored unless there's a cluster
1605 * in front of it that we can use to coalesce. We do
1606 * the latter mainly so later clusters can be coalesced
1607 * also w/o having to handle them specially (i.e. convert
1608 * mbuf+cluster -> cluster). This optimization is heavily
1609 * influenced by the assumption that we're running over
1610 * Ethernet where MCLBYTES is large enough that the max
1611 * packet size will permit lots of coalescing into a
1612 * single cluster. This in turn permits efficient
1613 * crypto operations, especially when using hardware.
1614 */
1615 if ((m->m_flags & M_EXT) == 0) {
1616 if (mprev && (mprev->m_flags & M_EXT) &&
1617 m->m_len <= M_TRAILINGSPACE(mprev)) {
1618 /* XXX: this ignores mbuf types */
1619 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1620 mtod(m, caddr_t), m->m_len);
1621 mprev->m_len += m->m_len;
1622 mprev->m_next = m->m_next; /* unlink from chain */
1623 m_free(m); /* reclaim mbuf */
1624 #if 0
1625 newipsecstat.ips_mbcoalesced++;
1626 #endif
1627 } else {
1628 mprev = m;
1629 }
1630 continue;
1631 }
1632 /*
1633 * Writable mbufs are left alone (for now).
1634 */
1635 if (M_WRITABLE(m)) {
1636 mprev = m;
1637 continue;
1638 }
1639
1640 /*
1641 * Not writable, replace with a copy or coalesce with
1642 * the previous mbuf if possible (since we have to copy
1643 * it anyway, we try to reduce the number of mbufs and
1644 * clusters so that future work is easier).
1645 */
1646 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1647 /* NB: we only coalesce into a cluster or larger */
1648 if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1649 m->m_len <= M_TRAILINGSPACE(mprev)) {
1650 /* XXX: this ignores mbuf types */
1651 memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1652 mtod(m, caddr_t), m->m_len);
1653 mprev->m_len += m->m_len;
1654 mprev->m_next = m->m_next; /* unlink from chain */
1655 m_free(m); /* reclaim mbuf */
1656 #if 0
1657 newipsecstat.ips_clcoalesced++;
1658 #endif
1659 continue;
1660 }
1661
1662 /*
1663 * Allocate new space to hold the copy and copy the data.
1664 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by
1665 * splitting them into clusters. We could just malloc a
1666 * buffer and make it external but too many device drivers
1667 * don't know how to break up the non-contiguous memory when
1668 * doing DMA.
1669 */
1670 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1671 if (n == NULL) {
1672 m_freem(m0);
1673 return (NULL);
1674 }
1675 if (m->m_flags & M_PKTHDR) {
1676 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR",
1677 __func__, m0, m));
1678 m_move_pkthdr(n, m);
1679 }
1680 len = m->m_len;
1681 off = 0;
1682 mfirst = n;
1683 mlast = NULL;
1684 for (;;) {
1685 int cc = min(len, MCLBYTES);
1686 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1687 n->m_len = cc;
1688 if (mlast != NULL)
1689 mlast->m_next = n;
1690 mlast = n;
1691 #if 0
1692 newipsecstat.ips_clcopied++;
1693 #endif
1694
1695 len -= cc;
1696 if (len <= 0)
1697 break;
1698 off += cc;
1699
1700 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS);
1701 if (n == NULL) {
1702 m_freem(mfirst);
1703 m_freem(m0);
1704 return (NULL);
1705 }
1706 }
1707 n->m_next = m->m_next;
1708 if (mprev == NULL)
1709 m0 = mfirst; /* new head of chain */
1710 else
1711 mprev->m_next = mfirst; /* replace old mbuf */
1712 m_free(m); /* release old mbuf */
1713 mprev = mfirst;
1714 }
1715 return (m0);
1716 }
1717
1718 #ifdef MBUF_PROFILING
1719
1720 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
1721 struct mbufprofile {
1722 uintmax_t wasted[MP_BUCKETS];
1723 uintmax_t used[MP_BUCKETS];
1724 uintmax_t segments[MP_BUCKETS];
1725 } mbprof;
1726
1727 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */
1728 #define MP_NUMLINES 6
1729 #define MP_NUMSPERLINE 16
1730 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */
1731 /* work out max space needed and add a bit of spare space too */
1732 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
1733 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
1734
1735 char mbprofbuf[MP_BUFSIZE];
1736
1737 void
m_profile(struct mbuf * m)1738 m_profile(struct mbuf *m)
1739 {
1740 int segments = 0;
1741 int used = 0;
1742 int wasted = 0;
1743
1744 while (m) {
1745 segments++;
1746 used += m->m_len;
1747 if (m->m_flags & M_EXT) {
1748 wasted += MHLEN - sizeof(m->m_ext) +
1749 m->m_ext.ext_size - m->m_len;
1750 } else {
1751 if (m->m_flags & M_PKTHDR)
1752 wasted += MHLEN - m->m_len;
1753 else
1754 wasted += MLEN - m->m_len;
1755 }
1756 m = m->m_next;
1757 }
1758 /* be paranoid.. it helps */
1759 if (segments > MP_BUCKETS - 1)
1760 segments = MP_BUCKETS - 1;
1761 if (used > 100000)
1762 used = 100000;
1763 if (wasted > 100000)
1764 wasted = 100000;
1765 /* store in the appropriate bucket */
1766 /* don't bother locking. if it's slightly off, so what? */
1767 mbprof.segments[segments]++;
1768 mbprof.used[fls(used)]++;
1769 mbprof.wasted[fls(wasted)]++;
1770 }
1771
1772 static void
mbprof_textify(void)1773 mbprof_textify(void)
1774 {
1775 int offset;
1776 char *c;
1777 uint64_t *p;
1778
1779 p = &mbprof.wasted[0];
1780 c = mbprofbuf;
1781 offset = snprintf(c, MP_MAXLINE + 10,
1782 "wasted:\n"
1783 "%ju %ju %ju %ju %ju %ju %ju %ju "
1784 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1785 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1786 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1787 #ifdef BIG_ARRAY
1788 p = &mbprof.wasted[16];
1789 c += offset;
1790 offset = snprintf(c, MP_MAXLINE,
1791 "%ju %ju %ju %ju %ju %ju %ju %ju "
1792 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1793 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1794 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1795 #endif
1796 p = &mbprof.used[0];
1797 c += offset;
1798 offset = snprintf(c, MP_MAXLINE + 10,
1799 "used:\n"
1800 "%ju %ju %ju %ju %ju %ju %ju %ju "
1801 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1802 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1803 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1804 #ifdef BIG_ARRAY
1805 p = &mbprof.used[16];
1806 c += offset;
1807 offset = snprintf(c, MP_MAXLINE,
1808 "%ju %ju %ju %ju %ju %ju %ju %ju "
1809 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1810 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1811 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1812 #endif
1813 p = &mbprof.segments[0];
1814 c += offset;
1815 offset = snprintf(c, MP_MAXLINE + 10,
1816 "segments:\n"
1817 "%ju %ju %ju %ju %ju %ju %ju %ju "
1818 "%ju %ju %ju %ju %ju %ju %ju %ju\n",
1819 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1820 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1821 #ifdef BIG_ARRAY
1822 p = &mbprof.segments[16];
1823 c += offset;
1824 offset = snprintf(c, MP_MAXLINE,
1825 "%ju %ju %ju %ju %ju %ju %ju %ju "
1826 "%ju %ju %ju %ju %ju %ju %ju %jju",
1827 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
1828 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
1829 #endif
1830 }
1831
1832 static int
mbprof_handler(SYSCTL_HANDLER_ARGS)1833 mbprof_handler(SYSCTL_HANDLER_ARGS)
1834 {
1835 int error;
1836
1837 mbprof_textify();
1838 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
1839 return (error);
1840 }
1841
1842 static int
mbprof_clr_handler(SYSCTL_HANDLER_ARGS)1843 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
1844 {
1845 int clear, error;
1846
1847 clear = 0;
1848 error = sysctl_handle_int(oidp, &clear, 0, req);
1849 if (error || !req->newptr)
1850 return (error);
1851
1852 if (clear) {
1853 bzero(&mbprof, sizeof(mbprof));
1854 }
1855
1856 return (error);
1857 }
1858
1859
1860 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
1861 NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
1862
1863 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
1864 NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");
1865 #endif
1866
1867