Annotation of src/sys/kern/uipc_mbuf.c, Revision 1.48
1.48 ! itojun 1: /* $NetBSD: uipc_mbuf.c,v 1.47 2000/06/27 17:41:44 mrg Exp $ */
1.42 thorpej 2:
3: /*-
4: * Copyright (c) 1999 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9: * NASA Ames Research Center.
10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.10 cgd 39:
1.1 cgd 40: /*
1.9 mycroft 41: * Copyright (c) 1982, 1986, 1988, 1991, 1993
42: * The Regents of the University of California. All rights reserved.
1.1 cgd 43: *
44: * Redistribution and use in source and binary forms, with or without
45: * modification, are permitted provided that the following conditions
46: * are met:
47: * 1. Redistributions of source code must retain the above copyright
48: * notice, this list of conditions and the following disclaimer.
49: * 2. Redistributions in binary form must reproduce the above copyright
50: * notice, this list of conditions and the following disclaimer in the
51: * documentation and/or other materials provided with the distribution.
52: * 3. All advertising materials mentioning features or use of this software
53: * must display the following acknowledgement:
54: * This product includes software developed by the University of
55: * California, Berkeley and its contributors.
56: * 4. Neither the name of the University nor the names of its contributors
57: * may be used to endorse or promote products derived from this software
58: * without specific prior written permission.
59: *
60: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70: * SUCH DAMAGE.
71: *
1.26 fvdl 72: * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
1.1 cgd 73: */
1.24 mrg 74:
1.6 mycroft 75: #include <sys/param.h>
76: #include <sys/systm.h>
77: #include <sys/proc.h>
78: #include <sys/malloc.h>
1.9 mycroft 79: #include <sys/map.h>
1.1 cgd 80: #define MBTYPES
1.6 mycroft 81: #include <sys/mbuf.h>
82: #include <sys/kernel.h>
83: #include <sys/syslog.h>
84: #include <sys/domain.h>
85: #include <sys/protosw.h>
1.28 thorpej 86: #include <sys/pool.h>
1.27 matt 87: #include <sys/socket.h>
88: #include <net/if.h>
1.14 christos 89:
1.23 mrg 90: #include <uvm/uvm_extern.h>
91:
1.42 thorpej 92: #include <sys/sysctl.h>
93:
1.28 thorpej 94: struct pool mbpool; /* mbuf pool */
95: struct pool mclpool; /* mbuf cluster pool */
96:
1.18 thorpej 97: struct mbstat mbstat;
98: int max_linkhdr;
99: int max_protohdr;
100: int max_hdr;
101: int max_datalen;
102:
1.28 thorpej 103: void *mclpool_alloc __P((unsigned long, int, int));
104: void mclpool_release __P((void *, unsigned long, int));
1.44 itojun 105: static struct mbuf *m_copym0 __P((struct mbuf *, int, int, int, int));
1.28 thorpej 106:
1.42 thorpej 107: const char *mclpool_warnmsg =
108: "WARNING: mclpool limit reached; increase NMBCLUSTERS";
109:
1.28 thorpej 110: /*
1.40 thorpej 111: * Initialize the mbuf allcator.
1.28 thorpej 112: */
1.4 jtc 113: void
1.1 cgd 114: mbinit()
115: {
116:
1.42 thorpej 117: pool_init(&mbpool, msize, 0, 0, 0, "mbpl", 0, NULL, NULL, 0);
118: pool_init(&mclpool, mclbytes, 0, 0, 0, "mclpl", 0, mclpool_alloc,
1.28 thorpej 119: mclpool_release, 0);
1.37 thorpej 120:
121: /*
1.39 thorpej 122: * Set the hard limit on the mclpool to the number of
123: * mbuf clusters the kernel is to support. Log the limit
124: * reached message max once a minute.
125: */
1.42 thorpej 126: pool_sethardlimit(&mclpool, nmbclusters, mclpool_warnmsg, 60);
127:
1.39 thorpej 128: /*
1.42 thorpej 129: * Set a low water mark for both mbufs and clusters. This should
130: * help ensure that they can be allocated in a memory starvation
131: * situation. This is important for e.g. diskless systems which
132: * must allocate mbufs in order for the pagedaemon to clean pages.
1.37 thorpej 133: */
1.42 thorpej 134: pool_setlowat(&mbpool, mblowat);
135: pool_setlowat(&mclpool, mcllowat);
136: }
137:
138: int
139: sysctl_dombuf(name, namelen, oldp, oldlenp, newp, newlen)
140: int *name;
141: u_int namelen;
142: void *oldp;
143: size_t *oldlenp;
144: void *newp;
145: size_t newlen;
146: {
147: int error, newval;
148:
149: /* All sysctl names at this level are terminal. */
150: if (namelen != 1)
151: return (ENOTDIR); /* overloaded */
152:
153: switch (name[0]) {
154: case MBUF_MSIZE:
155: return (sysctl_rdint(oldp, oldlenp, newp, msize));
156: case MBUF_MCLBYTES:
157: return (sysctl_rdint(oldp, oldlenp, newp, mclbytes));
158: case MBUF_NMBCLUSTERS:
159: /*
160: * If we have direct-mapped pool pages, we can adjust this
161: * number on the fly. If not, we're limited by the size
162: * of mb_map, and cannot change this value.
163: *
164: * Note: we only allow the value to be increased, never
165: * decreased.
166: */
167: if (mb_map == NULL) {
168: newval = nmbclusters;
169: error = sysctl_int(oldp, oldlenp, newp, newlen,
170: &newval);
171: if (error != 0)
172: return (error);
173: if (newp != NULL) {
174: if (newval >= nmbclusters) {
175: nmbclusters = newval;
176: pool_sethardlimit(&mclpool,
177: nmbclusters, mclpool_warnmsg, 60);
178: } else
179: error = EINVAL;
180: }
181: return (error);
182: } else
183: return (sysctl_rdint(oldp, oldlenp, newp, nmbclusters));
184: case MBUF_MBLOWAT:
185: case MBUF_MCLLOWAT:
186: /* New value must be >= 0. */
187: newval = (name[0] == MBUF_MBLOWAT) ? mblowat : mcllowat;
188: error = sysctl_int(oldp, oldlenp, newp, newlen, &newval);
189: if (error != 0)
190: return (error);
191: if (newp != NULL) {
192: if (newval >= 0) {
193: if (name[0] == MBUF_MBLOWAT) {
194: mblowat = newval;
195: pool_setlowat(&mbpool, newval);
196: } else {
197: mcllowat = newval;
198: pool_setlowat(&mclpool, newval);
199: }
200: } else
201: error = EINVAL;
202: }
203: return (error);
204: default:
205: return (EOPNOTSUPP);
206: }
207: /* NOTREACHED */
1.28 thorpej 208: }
209:
210: void *
211: mclpool_alloc(sz, flags, mtype)
212: unsigned long sz;
213: int flags;
214: int mtype;
215: {
1.32 thorpej 216: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
1.28 thorpej 217:
1.39 thorpej 218: return ((void *)uvm_km_alloc_poolpage1(mb_map, uvmexp.mb_object,
219: waitok));
1.1 cgd 220: }
221:
1.28 thorpej 222: void
223: mclpool_release(v, sz, mtype)
224: void *v;
225: unsigned long sz;
226: int mtype;
1.1 cgd 227: {
228:
1.31 thorpej 229: uvm_km_free_poolpage1(mb_map, (vaddr_t)v);
1.1 cgd 230: }
231:
232: /*
233: * When MGET failes, ask protocols to free space when short of memory,
234: * then re-attempt to allocate an mbuf.
235: */
236: struct mbuf *
237: m_retry(i, t)
238: int i, t;
239: {
1.27 matt 240: struct mbuf *m;
1.1 cgd 241:
1.29 thorpej 242: m_reclaim(i);
1.1 cgd 243: #define m_retry(i, t) (struct mbuf *)0
244: MGET(m, i, t);
245: #undef m_retry
1.18 thorpej 246: if (m != NULL)
247: mbstat.m_wait++;
248: else
249: mbstat.m_drops++;
1.1 cgd 250: return (m);
251: }
252:
253: /*
254: * As above; retry an MGETHDR.
255: */
256: struct mbuf *
257: m_retryhdr(i, t)
258: int i, t;
259: {
1.27 matt 260: struct mbuf *m;
1.1 cgd 261:
1.29 thorpej 262: m_reclaim(i);
1.1 cgd 263: #define m_retryhdr(i, t) (struct mbuf *)0
264: MGETHDR(m, i, t);
265: #undef m_retryhdr
1.18 thorpej 266: if (m != NULL)
267: mbstat.m_wait++;
268: else
269: mbstat.m_drops++;
1.1 cgd 270: return (m);
271: }
272:
1.14 christos 273: void
1.29 thorpej 274: m_reclaim(how)
275: int how;
1.1 cgd 276: {
1.27 matt 277: struct domain *dp;
278: struct protosw *pr;
279: struct ifnet *ifp;
1.1 cgd 280: int s = splimp();
281:
1.33 thorpej 282: for (dp = domains; dp; dp = dp->dom_next)
283: for (pr = dp->dom_protosw;
284: pr < dp->dom_protoswNPROTOSW; pr++)
285: if (pr->pr_drain)
286: (*pr->pr_drain)();
1.27 matt 287: for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list))
288: if (ifp->if_drain)
289: (*ifp->if_drain)(ifp);
1.1 cgd 290: splx(s);
291: mbstat.m_drain++;
292: }
293:
294: /*
295: * Space allocation routines.
296: * These are also available as macros
297: * for critical paths.
298: */
299: struct mbuf *
1.5 cgd 300: m_get(nowait, type)
301: int nowait, type;
1.1 cgd 302: {
1.27 matt 303: struct mbuf *m;
1.1 cgd 304:
1.5 cgd 305: MGET(m, nowait, type);
1.1 cgd 306: return (m);
307: }
308:
309: struct mbuf *
1.5 cgd 310: m_gethdr(nowait, type)
311: int nowait, type;
1.1 cgd 312: {
1.27 matt 313: struct mbuf *m;
1.1 cgd 314:
1.5 cgd 315: MGETHDR(m, nowait, type);
1.1 cgd 316: return (m);
317: }
318:
319: struct mbuf *
1.5 cgd 320: m_getclr(nowait, type)
321: int nowait, type;
1.1 cgd 322: {
1.27 matt 323: struct mbuf *m;
1.1 cgd 324:
1.5 cgd 325: MGET(m, nowait, type);
1.1 cgd 326: if (m == 0)
327: return (0);
1.30 perry 328: memset(mtod(m, caddr_t), 0, MLEN);
1.1 cgd 329: return (m);
330: }
331:
332: struct mbuf *
333: m_free(m)
334: struct mbuf *m;
335: {
1.27 matt 336: struct mbuf *n;
1.1 cgd 337:
338: MFREE(m, n);
339: return (n);
340: }
341:
1.9 mycroft 342: void
1.1 cgd 343: m_freem(m)
1.27 matt 344: struct mbuf *m;
1.1 cgd 345: {
1.27 matt 346: struct mbuf *n;
1.1 cgd 347:
348: if (m == NULL)
349: return;
1.45 itojun 350: if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) {
351: m_freem(m->m_pkthdr.aux);
352: m->m_pkthdr.aux = NULL;
353: }
1.1 cgd 354: do {
355: MFREE(m, n);
1.18 thorpej 356: m = n;
357: } while (m);
1.1 cgd 358: }
359:
360: /*
361: * Mbuffer utility routines.
362: */
363:
364: /*
365: * Lesser-used path for M_PREPEND:
366: * allocate new mbuf to prepend to chain,
367: * copy junk along.
368: */
369: struct mbuf *
1.9 mycroft 370: m_prepend(m, len, how)
1.27 matt 371: struct mbuf *m;
1.9 mycroft 372: int len, how;
1.1 cgd 373: {
374: struct mbuf *mn;
375:
1.9 mycroft 376: MGET(mn, how, m->m_type);
1.1 cgd 377: if (mn == (struct mbuf *)NULL) {
378: m_freem(m);
379: return ((struct mbuf *)NULL);
380: }
381: if (m->m_flags & M_PKTHDR) {
382: M_COPY_PKTHDR(mn, m);
383: m->m_flags &= ~M_PKTHDR;
384: }
385: mn->m_next = m;
386: m = mn;
387: if (len < MHLEN)
388: MH_ALIGN(m, len);
389: m->m_len = len;
390: return (m);
391: }
392:
393: /*
394: * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
395: * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
396: * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
397: */
398: int MCFail;
399:
400: struct mbuf *
401: m_copym(m, off0, len, wait)
1.27 matt 402: struct mbuf *m;
1.1 cgd 403: int off0, wait;
1.27 matt 404: int len;
1.1 cgd 405: {
1.44 itojun 406: return m_copym0(m, off0, len, wait, 0); /* shallow copy on M_EXT */
407: }
408:
409: struct mbuf *
410: m_dup(m, off0, len, wait)
411: struct mbuf *m;
412: int off0, wait;
413: int len;
414: {
415: return m_copym0(m, off0, len, wait, 1); /* deep copy */
416: }
417:
418: static struct mbuf *
419: m_copym0(m, off0, len, wait, deep)
420: struct mbuf *m;
421: int off0, wait;
422: int len;
423: int deep; /* deep copy */
424: {
1.27 matt 425: struct mbuf *n, **np;
426: int off = off0;
1.1 cgd 427: struct mbuf *top;
428: int copyhdr = 0;
429:
430: if (off < 0 || len < 0)
1.43 thorpej 431: panic("m_copym: off %d, len %d", off, len);
1.1 cgd 432: if (off == 0 && m->m_flags & M_PKTHDR)
433: copyhdr = 1;
434: while (off > 0) {
435: if (m == 0)
1.43 thorpej 436: panic("m_copym: m == 0");
1.1 cgd 437: if (off < m->m_len)
438: break;
439: off -= m->m_len;
440: m = m->m_next;
441: }
442: np = ⊤
443: top = 0;
444: while (len > 0) {
445: if (m == 0) {
446: if (len != M_COPYALL)
1.43 thorpej 447: panic("m_copym: m == 0 and not COPYALL");
1.1 cgd 448: break;
449: }
450: MGET(n, wait, m->m_type);
451: *np = n;
452: if (n == 0)
453: goto nospace;
454: if (copyhdr) {
455: M_COPY_PKTHDR(n, m);
456: if (len == M_COPYALL)
457: n->m_pkthdr.len -= off0;
458: else
459: n->m_pkthdr.len = len;
460: copyhdr = 0;
461: }
1.9 mycroft 462: n->m_len = min(len, m->m_len - off);
1.1 cgd 463: if (m->m_flags & M_EXT) {
1.44 itojun 464: if (!deep) {
465: n->m_data = m->m_data + off;
466: n->m_ext = m->m_ext;
467: MCLADDREFERENCE(m, n);
468: } else {
1.48 ! itojun 469: /*
! 470: * XXX the code falsely assumes that, if mbufs
! 471: * are with M_EXT, the cluster region was
! 472: * allocated with MCLGET() and is with the size
! 473: * of MCLBYTES.
! 474: * this is not the case. for counter example,
! 475: * see MEXTMALLOC() and MEXTADD().
! 476: */
1.44 itojun 477: MCLGET(n, wait);
478: memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off,
479: (unsigned)n->m_len);
480: }
1.1 cgd 481: } else
1.30 perry 482: memcpy(mtod(n, caddr_t), mtod(m, caddr_t)+off,
1.1 cgd 483: (unsigned)n->m_len);
484: if (len != M_COPYALL)
485: len -= n->m_len;
486: off = 0;
487: m = m->m_next;
488: np = &n->m_next;
489: }
490: if (top == 0)
491: MCFail++;
492: return (top);
493: nospace:
494: m_freem(top);
495: MCFail++;
496: return (0);
497: }
498:
499: /*
1.18 thorpej 500: * Copy an entire packet, including header (which must be present).
501: * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
502: */
503: struct mbuf *
504: m_copypacket(m, how)
505: struct mbuf *m;
506: int how;
507: {
508: struct mbuf *top, *n, *o;
509:
510: MGET(n, how, m->m_type);
511: top = n;
512: if (!n)
513: goto nospace;
514:
515: M_COPY_PKTHDR(n, m);
516: n->m_len = m->m_len;
517: if (m->m_flags & M_EXT) {
518: n->m_data = m->m_data;
519: n->m_ext = m->m_ext;
520: MCLADDREFERENCE(m, n);
521: } else {
1.30 perry 522: memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
1.18 thorpej 523: }
524:
525: m = m->m_next;
526: while (m) {
527: MGET(o, how, m->m_type);
528: if (!o)
529: goto nospace;
530:
531: n->m_next = o;
532: n = n->m_next;
533:
534: n->m_len = m->m_len;
535: if (m->m_flags & M_EXT) {
536: n->m_data = m->m_data;
537: n->m_ext = m->m_ext;
538: MCLADDREFERENCE(m, n);
539: } else {
1.30 perry 540: memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
1.18 thorpej 541: }
542:
543: m = m->m_next;
544: }
545: return top;
546: nospace:
547: m_freem(top);
548: MCFail++;
549: return 0;
550: }
551:
552: /*
1.1 cgd 553: * Copy data from an mbuf chain starting "off" bytes from the beginning,
554: * continuing for "len" bytes, into the indicated buffer.
555: */
1.14 christos 556: void
1.1 cgd 557: m_copydata(m, off, len, cp)
1.27 matt 558: struct mbuf *m;
559: int off;
560: int len;
1.1 cgd 561: caddr_t cp;
562: {
1.27 matt 563: unsigned count;
1.1 cgd 564:
565: if (off < 0 || len < 0)
566: panic("m_copydata");
567: while (off > 0) {
568: if (m == 0)
569: panic("m_copydata");
570: if (off < m->m_len)
571: break;
572: off -= m->m_len;
573: m = m->m_next;
574: }
575: while (len > 0) {
576: if (m == 0)
577: panic("m_copydata");
1.9 mycroft 578: count = min(m->m_len - off, len);
1.30 perry 579: memcpy(cp, mtod(m, caddr_t) + off, count);
1.1 cgd 580: len -= count;
581: cp += count;
582: off = 0;
583: m = m->m_next;
584: }
585: }
586:
587: /*
588: * Concatenate mbuf chain n to m.
589: * Both chains must be of the same type (e.g. MT_DATA).
590: * Any m_pkthdr is not updated.
591: */
1.14 christos 592: void
1.1 cgd 593: m_cat(m, n)
1.27 matt 594: struct mbuf *m, *n;
1.1 cgd 595: {
596: while (m->m_next)
597: m = m->m_next;
598: while (n) {
599: if (m->m_flags & M_EXT ||
600: m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
601: /* just join the two chains */
602: m->m_next = n;
603: return;
604: }
605: /* splat the data from one into the other */
1.30 perry 606: memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1.1 cgd 607: (u_int)n->m_len);
608: m->m_len += n->m_len;
609: n = m_free(n);
610: }
611: }
612:
1.11 mycroft 613: void
1.1 cgd 614: m_adj(mp, req_len)
615: struct mbuf *mp;
1.8 deraadt 616: int req_len;
1.1 cgd 617: {
1.27 matt 618: int len = req_len;
619: struct mbuf *m;
620: int count;
1.1 cgd 621:
622: if ((m = mp) == NULL)
623: return;
624: if (len >= 0) {
625: /*
626: * Trim from head.
627: */
628: while (m != NULL && len > 0) {
629: if (m->m_len <= len) {
630: len -= m->m_len;
631: m->m_len = 0;
632: m = m->m_next;
633: } else {
634: m->m_len -= len;
635: m->m_data += len;
636: len = 0;
637: }
638: }
639: m = mp;
640: if (mp->m_flags & M_PKTHDR)
641: m->m_pkthdr.len -= (req_len - len);
642: } else {
643: /*
644: * Trim from tail. Scan the mbuf chain,
645: * calculating its length and finding the last mbuf.
646: * If the adjustment only affects this mbuf, then just
647: * adjust and return. Otherwise, rescan and truncate
648: * after the remaining size.
649: */
650: len = -len;
651: count = 0;
652: for (;;) {
653: count += m->m_len;
654: if (m->m_next == (struct mbuf *)0)
655: break;
656: m = m->m_next;
657: }
658: if (m->m_len >= len) {
659: m->m_len -= len;
1.8 deraadt 660: if (mp->m_flags & M_PKTHDR)
661: mp->m_pkthdr.len -= len;
1.1 cgd 662: return;
663: }
664: count -= len;
665: if (count < 0)
666: count = 0;
667: /*
668: * Correct length for chain is "count".
669: * Find the mbuf with last data, adjust its length,
670: * and toss data from remaining mbufs on chain.
671: */
672: m = mp;
673: if (m->m_flags & M_PKTHDR)
674: m->m_pkthdr.len = count;
675: for (; m; m = m->m_next) {
676: if (m->m_len >= count) {
677: m->m_len = count;
678: break;
679: }
680: count -= m->m_len;
681: }
1.18 thorpej 682: while (m->m_next)
683: (m = m->m_next) ->m_len = 0;
1.1 cgd 684: }
685: }
686:
687: /*
688: * Rearange an mbuf chain so that len bytes are contiguous
689: * and in the data area of an mbuf (so that mtod and dtom
690: * will work for a structure of size len). Returns the resulting
691: * mbuf chain on success, frees it and returns null on failure.
692: * If there is room, it will add up to max_protohdr-len extra bytes to the
693: * contiguous region in an attempt to avoid being called next time.
694: */
695: int MPFail;
696:
697: struct mbuf *
698: m_pullup(n, len)
1.27 matt 699: struct mbuf *n;
1.1 cgd 700: int len;
701: {
1.27 matt 702: struct mbuf *m;
703: int count;
1.1 cgd 704: int space;
705:
706: /*
707: * If first mbuf has no cluster, and has room for len bytes
708: * without shifting current data, pullup into it,
709: * otherwise allocate a new mbuf to prepend to the chain.
710: */
711: if ((n->m_flags & M_EXT) == 0 &&
712: n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
713: if (n->m_len >= len)
714: return (n);
715: m = n;
716: n = n->m_next;
717: len -= m->m_len;
718: } else {
719: if (len > MHLEN)
720: goto bad;
721: MGET(m, M_DONTWAIT, n->m_type);
722: if (m == 0)
723: goto bad;
724: m->m_len = 0;
725: if (n->m_flags & M_PKTHDR) {
726: M_COPY_PKTHDR(m, n);
727: n->m_flags &= ~M_PKTHDR;
728: }
729: }
730: space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
731: do {
732: count = min(min(max(len, max_protohdr), space), n->m_len);
1.30 perry 733: memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1.1 cgd 734: (unsigned)count);
735: len -= count;
736: m->m_len += count;
737: n->m_len -= count;
738: space -= count;
739: if (n->m_len)
740: n->m_data += count;
741: else
742: n = m_free(n);
743: } while (len > 0 && n);
744: if (len > 0) {
745: (void) m_free(m);
746: goto bad;
747: }
748: m->m_next = n;
749: return (m);
750: bad:
751: m_freem(n);
752: MPFail++;
753: return (0);
1.9 mycroft 754: }
755:
756: /*
757: * Partition an mbuf chain in two pieces, returning the tail --
758: * all but the first len0 bytes. In case of failure, it returns NULL and
759: * attempts to restore the chain to its original state.
760: */
761: struct mbuf *
762: m_split(m0, len0, wait)
1.27 matt 763: struct mbuf *m0;
1.9 mycroft 764: int len0, wait;
765: {
1.27 matt 766: struct mbuf *m, *n;
1.22 thorpej 767: unsigned len = len0, remain, len_save;
1.9 mycroft 768:
769: for (m = m0; m && len > m->m_len; m = m->m_next)
770: len -= m->m_len;
771: if (m == 0)
772: return (0);
773: remain = m->m_len - len;
774: if (m0->m_flags & M_PKTHDR) {
775: MGETHDR(n, wait, m0->m_type);
776: if (n == 0)
777: return (0);
778: n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
779: n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1.22 thorpej 780: len_save = m0->m_pkthdr.len;
1.9 mycroft 781: m0->m_pkthdr.len = len0;
782: if (m->m_flags & M_EXT)
783: goto extpacket;
784: if (remain > MHLEN) {
785: /* m can't be the lead packet */
786: MH_ALIGN(n, 0);
787: n->m_next = m_split(m, len, wait);
788: if (n->m_next == 0) {
789: (void) m_free(n);
1.22 thorpej 790: m0->m_pkthdr.len = len_save;
1.9 mycroft 791: return (0);
792: } else
793: return (n);
794: } else
795: MH_ALIGN(n, remain);
796: } else if (remain == 0) {
797: n = m->m_next;
798: m->m_next = 0;
799: return (n);
800: } else {
801: MGET(n, wait, m->m_type);
802: if (n == 0)
803: return (0);
804: M_ALIGN(n, remain);
805: }
806: extpacket:
807: if (m->m_flags & M_EXT) {
808: n->m_ext = m->m_ext;
1.18 thorpej 809: MCLADDREFERENCE(m, n);
1.9 mycroft 810: n->m_data = m->m_data + len;
811: } else {
1.30 perry 812: memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
1.9 mycroft 813: }
814: n->m_len = remain;
815: m->m_len = len;
816: n->m_next = m->m_next;
817: m->m_next = 0;
818: return (n);
819: }
820: /*
821: * Routine to copy from device local memory into mbufs.
822: */
823: struct mbuf *
824: m_devget(buf, totlen, off0, ifp, copy)
825: char *buf;
826: int totlen, off0;
827: struct ifnet *ifp;
1.18 thorpej 828: void (*copy) __P((const void *from, void *to, size_t len));
1.9 mycroft 829: {
1.27 matt 830: struct mbuf *m;
1.9 mycroft 831: struct mbuf *top = 0, **mp = ⊤
1.27 matt 832: int off = off0, len;
833: char *cp;
1.9 mycroft 834: char *epkt;
835:
836: cp = buf;
837: epkt = cp + totlen;
838: if (off) {
1.13 cgd 839: /*
840: * If 'off' is non-zero, packet is trailer-encapsulated,
841: * so we have to skip the type and length fields.
842: */
843: cp += off + 2 * sizeof(u_int16_t);
844: totlen -= 2 * sizeof(u_int16_t);
1.9 mycroft 845: }
846: MGETHDR(m, M_DONTWAIT, MT_DATA);
847: if (m == 0)
848: return (0);
849: m->m_pkthdr.rcvif = ifp;
850: m->m_pkthdr.len = totlen;
851: m->m_len = MHLEN;
852:
853: while (totlen > 0) {
854: if (top) {
855: MGET(m, M_DONTWAIT, MT_DATA);
856: if (m == 0) {
857: m_freem(top);
858: return (0);
859: }
860: m->m_len = MLEN;
861: }
862: len = min(totlen, epkt - cp);
863: if (len >= MINCLSIZE) {
864: MCLGET(m, M_DONTWAIT);
1.19 mycroft 865: if ((m->m_flags & M_EXT) == 0) {
1.20 mycroft 866: m_free(m);
1.19 mycroft 867: m_freem(top);
868: return (0);
869: }
870: m->m_len = len = min(len, MCLBYTES);
1.9 mycroft 871: } else {
872: /*
873: * Place initial small packet/header at end of mbuf.
874: */
875: if (len < m->m_len) {
876: if (top == 0 && len + max_linkhdr <= m->m_len)
877: m->m_data += max_linkhdr;
878: m->m_len = len;
879: } else
880: len = m->m_len;
881: }
882: if (copy)
1.14 christos 883: copy(cp, mtod(m, caddr_t), (size_t)len);
1.9 mycroft 884: else
1.30 perry 885: memcpy(mtod(m, caddr_t), cp, (size_t)len);
1.9 mycroft 886: cp += len;
887: *mp = m;
888: mp = &m->m_next;
889: totlen -= len;
890: if (cp == epkt)
891: cp = buf;
892: }
893: return (top);
1.18 thorpej 894: }
895:
896: /*
897: * Copy data from a buffer back into the indicated mbuf chain,
898: * starting "off" bytes from the beginning, extending the mbuf
899: * chain if necessary.
900: */
901: void
902: m_copyback(m0, off, len, cp)
903: struct mbuf *m0;
1.27 matt 904: int off;
905: int len;
1.18 thorpej 906: caddr_t cp;
907: {
1.27 matt 908: int mlen;
909: struct mbuf *m = m0, *n;
1.18 thorpej 910: int totlen = 0;
911:
912: if (m0 == 0)
913: return;
914: while (off > (mlen = m->m_len)) {
915: off -= mlen;
916: totlen += mlen;
917: if (m->m_next == 0) {
918: n = m_getclr(M_DONTWAIT, m->m_type);
919: if (n == 0)
920: goto out;
921: n->m_len = min(MLEN, len + off);
922: m->m_next = n;
923: }
924: m = m->m_next;
925: }
926: while (len > 0) {
927: mlen = min (m->m_len - off, len);
1.30 perry 928: memcpy(mtod(m, caddr_t) + off, cp, (unsigned)mlen);
1.18 thorpej 929: cp += mlen;
930: len -= mlen;
931: mlen += off;
932: off = 0;
933: totlen += mlen;
934: if (len == 0)
935: break;
936: if (m->m_next == 0) {
937: n = m_get(M_DONTWAIT, m->m_type);
938: if (n == 0)
939: break;
940: n->m_len = min(MLEN, len);
941: m->m_next = n;
942: }
943: m = m->m_next;
944: }
945: out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
946: m->m_pkthdr.len = totlen;
1.1 cgd 947: }
CVSweb <webmaster@jp.NetBSD.org>