version 1.181, 2018/01/22 15:05:27 |
version 1.181.2.9, 2018/10/20 06:58:45 |
Line 68 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 68 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include "opt_mbuftrace.h" |
#include "opt_mbuftrace.h" |
#include "opt_nmbclusters.h" |
#include "opt_nmbclusters.h" |
#include "opt_ddb.h" |
#include "opt_ddb.h" |
|
#include "ether.h" |
#endif |
#endif |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
Line 88 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 89 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <net/if.h> |
#include <net/if.h> |
|
|
pool_cache_t mb_cache; /* mbuf cache */ |
pool_cache_t mb_cache; /* mbuf cache */ |
pool_cache_t mcl_cache; /* mbuf cluster cache */ |
static pool_cache_t mcl_cache; /* mbuf cluster cache */ |
|
|
struct mbstat mbstat; |
struct mbstat mbstat; |
int max_linkhdr; |
int max_linkhdr; |
int max_protohdr; |
int max_protohdr; |
int max_hdr; |
int max_hdr; |
int max_datalen; |
int max_datalen; |
|
|
|
static void mb_drain(void *, int); |
static int mb_ctor(void *, void *, int); |
static int mb_ctor(void *, void *, int); |
|
|
static void sysctl_kern_mbuf_setup(void); |
static void sysctl_kern_mbuf_setup(void); |
|
|
static struct sysctllog *mbuf_sysctllog; |
static struct sysctllog *mbuf_sysctllog; |
|
|
static struct mbuf *m_copym0(struct mbuf *, int, int, int, bool); |
static struct mbuf *m_copy_internal(struct mbuf *, int, int, int, bool); |
static struct mbuf *m_split0(struct mbuf *, int, int, bool); |
static struct mbuf *m_split_internal(struct mbuf *, int, int, bool); |
static int m_copyback0(struct mbuf **, int, int, const void *, int, int); |
static int m_copyback_internal(struct mbuf **, int, int, const void *, |
|
int, int); |
/* flags for m_copyback0 */ |
|
#define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */ |
/* Flags for m_copyback_internal. */ |
#define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */ |
#define CB_COPYBACK 0x0001 /* copyback from cp */ |
#define M_COPYBACK0_COW 0x0004 /* do copy-on-write */ |
#define CB_PRESERVE 0x0002 /* preserve original data */ |
#define M_COPYBACK0_EXTEND 0x0008 /* extend chain */ |
#define CB_COW 0x0004 /* do copy-on-write */ |
|
#define CB_EXTEND 0x0008 /* extend chain */ |
|
|
static const char mclpool_warnmsg[] = |
static const char mclpool_warnmsg[] = |
"WARNING: mclpool limit reached; increase kern.mbuf.nmbclusters"; |
"WARNING: mclpool limit reached; increase kern.mbuf.nmbclusters"; |
|
|
atomic_inc_uint(&(o)->m_ext.ext_refcnt); \ |
atomic_inc_uint(&(o)->m_ext.ext_refcnt); \ |
(n)->m_ext_ref = (o)->m_ext_ref; \ |
(n)->m_ext_ref = (o)->m_ext_ref; \ |
mowner_ref((n), (n)->m_flags); \ |
mowner_ref((n), (n)->m_flags); \ |
MCLREFDEBUGN((n), __FILE__, __LINE__); \ |
|
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
static int |
static int |
|
|
IPL_VM, NULL, NULL, NULL); |
IPL_VM, NULL, NULL, NULL); |
KASSERT(mcl_cache != NULL); |
KASSERT(mcl_cache != NULL); |
|
|
pool_cache_set_drain_hook(mb_cache, m_reclaim, NULL); |
pool_cache_set_drain_hook(mb_cache, mb_drain, NULL); |
pool_cache_set_drain_hook(mcl_cache, m_reclaim, NULL); |
pool_cache_set_drain_hook(mcl_cache, mb_drain, NULL); |
|
|
/* |
/* |
* Set an arbitrary default limit on the number of mbuf clusters. |
* Set an arbitrary default limit on the number of mbuf clusters. |
|
|
#endif |
#endif |
} |
} |
|
|
|
static void |
|
mb_drain(void *arg, int flags) |
|
{ |
|
struct domain *dp; |
|
const struct protosw *pr; |
|
struct ifnet *ifp; |
|
int s; |
|
|
|
KERNEL_LOCK(1, NULL); |
|
s = splvm(); |
|
DOMAIN_FOREACH(dp) { |
|
for (pr = dp->dom_protosw; |
|
pr < dp->dom_protoswNPROTOSW; pr++) |
|
if (pr->pr_drain) |
|
(*pr->pr_drain)(); |
|
} |
|
/* XXX we cannot use psref in H/W interrupt */ |
|
if (!cpu_intr_p()) { |
|
int bound = curlwp_bind(); |
|
IFNET_READER_FOREACH(ifp) { |
|
struct psref psref; |
|
|
|
if_acquire(ifp, &psref); |
|
|
|
if (ifp->if_drain) |
|
(*ifp->if_drain)(ifp); |
|
|
|
if_release(ifp, &psref); |
|
} |
|
curlwp_bindx(bound); |
|
} |
|
splx(s); |
|
mbstat.m_drain++; |
|
KERNEL_UNLOCK_ONE(NULL); |
|
} |
|
|
/* |
/* |
* sysctl helper routine for the kern.mbuf subtree. |
* sysctl helper routine for the kern.mbuf subtree. |
* nmbclusters, mblowat and mcllowat need range |
* nmbclusters, mblowat and mcllowat need range |
Line 256 sysctl_kern_mbuf(SYSCTLFN_ARGS) |
|
Line 294 sysctl_kern_mbuf(SYSCTLFN_ARGS) |
|
newval = *(int*)rnode->sysctl_data; |
newval = *(int*)rnode->sysctl_data; |
break; |
break; |
default: |
default: |
return (EOPNOTSUPP); |
return EOPNOTSUPP; |
} |
} |
|
|
error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
if (error || newp == NULL) |
if (error || newp == NULL) |
return (error); |
return error; |
if (newval < 0) |
if (newval < 0) |
return (EINVAL); |
return EINVAL; |
|
|
switch (node.sysctl_num) { |
switch (node.sysctl_num) { |
case MBUF_NMBCLUSTERS: |
case MBUF_NMBCLUSTERS: |
if (newval < nmbclusters) |
if (newval < nmbclusters) |
return (EINVAL); |
return EINVAL; |
if (newval > nmbclusters_limit()) |
if (newval > nmbclusters_limit()) |
return (EINVAL); |
return EINVAL; |
nmbclusters = newval; |
nmbclusters = newval; |
pool_cache_sethardlimit(mcl_cache, nmbclusters, |
pool_cache_sethardlimit(mcl_cache, nmbclusters, |
mclpool_warnmsg, 60); |
mclpool_warnmsg, 60); |
Line 285 sysctl_kern_mbuf(SYSCTLFN_ARGS) |
|
Line 323 sysctl_kern_mbuf(SYSCTLFN_ARGS) |
|
break; |
break; |
} |
} |
|
|
return (0); |
return 0; |
} |
} |
|
|
#ifdef MBUFTRACE |
#ifdef MBUFTRACE |
static void |
static void |
mowner_conver_to_user_cb(void *v1, void *v2, struct cpu_info *ci) |
mowner_convert_to_user_cb(void *v1, void *v2, struct cpu_info *ci) |
{ |
{ |
struct mowner_counter *mc = v1; |
struct mowner_counter *mc = v1; |
struct mowner_user *mo_user = v2; |
struct mowner_user *mo_user = v2; |
Line 310 mowner_convert_to_user(struct mowner *mo |
|
Line 348 mowner_convert_to_user(struct mowner *mo |
|
CTASSERT(sizeof(mo_user->mo_descr) == sizeof(mo->mo_descr)); |
CTASSERT(sizeof(mo_user->mo_descr) == sizeof(mo->mo_descr)); |
memcpy(mo_user->mo_name, mo->mo_name, sizeof(mo->mo_name)); |
memcpy(mo_user->mo_name, mo->mo_name, sizeof(mo->mo_name)); |
memcpy(mo_user->mo_descr, mo->mo_descr, sizeof(mo->mo_descr)); |
memcpy(mo_user->mo_descr, mo->mo_descr, sizeof(mo->mo_descr)); |
percpu_foreach(mo->mo_counters, mowner_conver_to_user_cb, mo_user); |
percpu_foreach(mo->mo_counters, mowner_convert_to_user_cb, mo_user); |
} |
} |
|
|
static int |
static int |
Line 321 sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS) |
|
Line 359 sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS) |
|
int error = 0; |
int error = 0; |
|
|
if (namelen != 0) |
if (namelen != 0) |
return (EINVAL); |
return EINVAL; |
if (newp != NULL) |
if (newp != NULL) |
return (EPERM); |
return EPERM; |
|
|
LIST_FOREACH(mo, &mowners, mo_link) { |
LIST_FOREACH(mo, &mowners, mo_link) { |
struct mowner_user mo_user; |
struct mowner_user mo_user; |
Line 346 sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS) |
|
Line 384 sysctl_kern_mbuf_mowners(SYSCTLFN_ARGS) |
|
if (error == 0) |
if (error == 0) |
*oldlenp = len; |
*oldlenp = len; |
|
|
return (error); |
return error; |
} |
} |
#endif /* MBUFTRACE */ |
#endif /* MBUFTRACE */ |
|
|
|
void |
|
mbstat_type_add(int type, int diff) |
|
{ |
|
struct mbstat_cpu *mb; |
|
int s; |
|
|
|
s = splvm(); |
|
mb = percpu_getref(mbstat_percpu); |
|
mb->m_mtypes[type] += diff; |
|
percpu_putref(mbstat_percpu); |
|
splx(s); |
|
} |
|
|
static void |
static void |
mbstat_conver_to_user_cb(void *v1, void *v2, struct cpu_info *ci) |
mbstat_conver_to_user_cb(void *v1, void *v2, struct cpu_info *ci) |
{ |
{ |
Line 439 sysctl_kern_mbuf_setup(void) |
|
Line 490 sysctl_kern_mbuf_setup(void) |
|
SYSCTL_DESCR("Information about mbuf owners"), |
SYSCTL_DESCR("Information about mbuf owners"), |
sysctl_kern_mbuf_mowners, 0, NULL, 0, |
sysctl_kern_mbuf_mowners, 0, NULL, 0, |
CTL_KERN, KERN_MBUF, MBUF_MOWNERS, CTL_EOL); |
CTL_KERN, KERN_MBUF, MBUF_MOWNERS, CTL_EOL); |
#endif /* MBUFTRACE */ |
#endif |
} |
} |
|
|
static int |
static int |
Line 452 mb_ctor(void *arg, void *object, int fla |
|
Line 503 mb_ctor(void *arg, void *object, int fla |
|
#else |
#else |
m->m_paddr = M_PADDR_INVALID; |
m->m_paddr = M_PADDR_INVALID; |
#endif |
#endif |
return (0); |
return 0; |
} |
} |
|
|
/* |
/* |
Line 472 m_add(struct mbuf *c, struct mbuf *m) |
|
Line 523 m_add(struct mbuf *c, struct mbuf *m) |
|
return c; |
return c; |
} |
} |
|
|
/* |
|
* Set the m_data pointer of a newly-allocated mbuf |
|
* to place an object of the specified size at the |
|
* end of the mbuf, longword aligned. |
|
*/ |
|
void |
|
m_align(struct mbuf *m, int len) |
|
{ |
|
int adjust; |
|
|
|
KASSERT(len != M_COPYALL); |
|
|
|
if (m->m_flags & M_EXT) |
|
adjust = m->m_ext.ext_size - len; |
|
else if (m->m_flags & M_PKTHDR) |
|
adjust = MHLEN - len; |
|
else |
|
adjust = MLEN - len; |
|
m->m_data += adjust &~ (sizeof(long)-1); |
|
} |
|
|
|
/* |
|
* Append the specified data to the indicated mbuf chain, |
|
* Extend the mbuf chain if the new data does not fit in |
|
* existing space. |
|
* |
|
* Return 1 if able to complete the job; otherwise 0. |
|
*/ |
|
int |
|
m_append(struct mbuf *m0, int len, const void *cpv) |
|
{ |
|
struct mbuf *m, *n; |
|
int remainder, space; |
|
const char *cp = cpv; |
|
|
|
KASSERT(len != M_COPYALL); |
|
for (m = m0; m->m_next != NULL; m = m->m_next) |
|
continue; |
|
remainder = len; |
|
space = M_TRAILINGSPACE(m); |
|
if (space > 0) { |
|
/* |
|
* Copy into available space. |
|
*/ |
|
if (space > remainder) |
|
space = remainder; |
|
memmove(mtod(m, char *) + m->m_len, cp, space); |
|
m->m_len += space; |
|
cp = cp + space, remainder -= space; |
|
} |
|
while (remainder > 0) { |
|
/* |
|
* Allocate a new mbuf; could check space |
|
* and allocate a cluster instead. |
|
*/ |
|
n = m_get(M_DONTWAIT, m->m_type); |
|
if (n == NULL) |
|
break; |
|
n->m_len = min(MLEN, remainder); |
|
memmove(mtod(n, void *), cp, n->m_len); |
|
cp += n->m_len, remainder -= n->m_len; |
|
m->m_next = n; |
|
m = n; |
|
} |
|
if (m0->m_flags & M_PKTHDR) |
|
m0->m_pkthdr.len += len - remainder; |
|
return (remainder == 0); |
|
} |
|
|
|
void |
|
m_reclaim(void *arg, int flags) |
|
{ |
|
struct domain *dp; |
|
const struct protosw *pr; |
|
struct ifnet *ifp; |
|
int s; |
|
|
|
KERNEL_LOCK(1, NULL); |
|
s = splvm(); |
|
DOMAIN_FOREACH(dp) { |
|
for (pr = dp->dom_protosw; |
|
pr < dp->dom_protoswNPROTOSW; pr++) |
|
if (pr->pr_drain) |
|
(*pr->pr_drain)(); |
|
} |
|
/* XXX we cannot use psref in H/W interrupt */ |
|
if (!cpu_intr_p()) { |
|
int bound = curlwp_bind(); |
|
IFNET_READER_FOREACH(ifp) { |
|
struct psref psref; |
|
|
|
if_acquire(ifp, &psref); |
|
|
|
if (ifp->if_drain) |
|
(*ifp->if_drain)(ifp); |
|
|
|
if_release(ifp, &psref); |
|
} |
|
curlwp_bindx(bound); |
|
} |
|
splx(s); |
|
mbstat.m_drain++; |
|
KERNEL_UNLOCK_ONE(NULL); |
|
} |
|
|
|
/* |
|
* Space allocation routines. |
|
* These are also available as macros |
|
* for critical paths. |
|
*/ |
|
struct mbuf * |
struct mbuf * |
m_get(int nowait, int type) |
m_get(int how, int type) |
{ |
{ |
struct mbuf *m; |
struct mbuf *m; |
|
|
KASSERT(type != MT_FREE); |
KASSERT(type != MT_FREE); |
|
|
m = pool_cache_get(mb_cache, |
m = pool_cache_get(mb_cache, |
nowait == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : PR_NOWAIT); |
how == M_WAIT ? PR_WAITOK|PR_LIMITFAIL : PR_NOWAIT); |
if (m == NULL) |
if (m == NULL) |
return NULL; |
return NULL; |
|
|
mbstat_type_add(type, 1); |
mbstat_type_add(type, 1); |
|
|
m_hdr_init(m, type, NULL, m->m_dat, 0); |
mowner_init(m, type); |
|
m->m_ext_ref = m; /* default */ |
|
m->m_type = type; |
|
m->m_len = 0; |
|
m->m_next = NULL; |
|
m->m_nextpkt = NULL; /* default */ |
|
m->m_data = m->m_dat; |
|
m->m_flags = 0; /* default */ |
|
|
return m; |
return m; |
} |
} |
|
|
struct mbuf * |
struct mbuf * |
m_gethdr(int nowait, int type) |
m_gethdr(int how, int type) |
{ |
{ |
struct mbuf *m; |
struct mbuf *m; |
|
|
m = m_get(nowait, type); |
m = m_get(how, type); |
if (m == NULL) |
if (m == NULL) |
return NULL; |
return NULL; |
|
|
m_pkthdr_init(m); |
m->m_data = m->m_pktdat; |
|
m->m_flags = M_PKTHDR; |
|
|
return m; |
m_reset_rcvif(m); |
} |
m->m_pkthdr.len = 0; |
|
m->m_pkthdr.csum_flags = 0; |
|
m->m_pkthdr.csum_data = 0; |
|
SLIST_INIT(&m->m_pkthdr.tags); |
|
|
|
m->m_pkthdr.pattr_class = NULL; |
|
m->m_pkthdr.pattr_af = AF_UNSPEC; |
|
m->m_pkthdr.pattr_hdr = NULL; |
|
|
struct mbuf * |
|
m_getclr(int nowait, int type) |
|
{ |
|
struct mbuf *m; |
|
|
|
m = m_get(nowait, type); |
|
if (m == NULL) |
|
return NULL; |
|
memset(mtod(m, void *), 0, MLEN); |
|
return m; |
return m; |
} |
} |
|
|
void |
void |
m_clget(struct mbuf *m, int nowait) |
m_clget(struct mbuf *m, int how) |
{ |
{ |
|
m->m_ext_storage.ext_buf = (char *)pool_cache_get_paddr(mcl_cache, |
|
how == M_WAIT ? (PR_WAITOK|PR_LIMITFAIL) : PR_NOWAIT, |
|
&m->m_ext_storage.ext_paddr); |
|
|
MCLGET(m, nowait); |
if (m->m_ext_storage.ext_buf == NULL) |
} |
return; |
|
|
#ifdef MBUFTRACE |
MCLINITREFERENCE(m); |
/* |
m->m_data = m->m_ext.ext_buf; |
* Walk a chain of mbufs, claiming ownership of each mbuf in the chain. |
m->m_flags = (m->m_flags & ~M_EXTCOPYFLAGS) | |
*/ |
M_EXT|M_EXT_CLUSTER|M_EXT_RW; |
void |
m->m_ext.ext_size = MCLBYTES; |
m_claimm(struct mbuf *m, struct mowner *mo) |
m->m_ext.ext_free = NULL; |
{ |
m->m_ext.ext_arg = NULL; |
|
/* ext_paddr initialized above */ |
|
|
for (; m != NULL; m = m->m_next) |
mowner_ref(m, M_EXT|M_EXT_CLUSTER); |
MCLAIM(m, mo); |
|
} |
} |
#endif |
|
|
|
/* |
/* |
* Mbuffer utility routines. |
* Utility function for M_PREPEND. Do *NOT* use it directly. |
*/ |
|
|
|
/* |
|
* Lesser-used path for M_PREPEND: |
|
* allocate new mbuf to prepend to chain, |
|
* copy junk along. |
|
*/ |
*/ |
struct mbuf * |
struct mbuf * |
m_prepend(struct mbuf *m, int len, int how) |
m_prepend(struct mbuf *m, int len, int how) |
Line 692 m_prepend(struct mbuf *m, int len, int h |
|
Line 635 m_prepend(struct mbuf *m, int len, int h |
|
return m; |
return m; |
} |
} |
|
|
/* |
|
* Make a copy of an mbuf chain starting "off0" bytes from the beginning, |
|
* continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. |
|
* The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. |
|
*/ |
|
int MCFail; |
|
|
|
struct mbuf * |
struct mbuf * |
m_copym(struct mbuf *m, int off0, int len, int wait) |
m_copym(struct mbuf *m, int off, int len, int wait) |
{ |
{ |
|
/* Shallow copy on M_EXT. */ |
return m_copym0(m, off0, len, wait, false); /* shallow copy on M_EXT */ |
return m_copy_internal(m, off, len, wait, false); |
} |
} |
|
|
struct mbuf * |
struct mbuf * |
m_dup(struct mbuf *m, int off0, int len, int wait) |
m_dup(struct mbuf *m, int off, int len, int wait) |
{ |
{ |
|
/* Deep copy. */ |
return m_copym0(m, off0, len, wait, true); /* deep copy */ |
return m_copy_internal(m, off, len, wait, true); |
} |
} |
|
|
static inline int |
static inline int |
m_copylen(int len, int copylen) |
m_copylen(int len, int copylen) |
{ |
{ |
return (len == M_COPYALL) ? copylen : min(len, copylen); |
return (len == M_COPYALL) ? copylen : uimin(len, copylen); |
} |
} |
|
|
static struct mbuf * |
static struct mbuf * |
m_copym0(struct mbuf *m, int off0, int len, int wait, bool deep) |
m_copy_internal(struct mbuf *m, int off0, int len, int wait, bool deep) |
{ |
{ |
struct mbuf *n, **np; |
struct mbuf *n, **np; |
int off = off0; |
int off = off0; |
Line 728 m_copym0(struct mbuf *m, int off0, int l |
|
Line 664 m_copym0(struct mbuf *m, int off0, int l |
|
int copyhdr = 0; |
int copyhdr = 0; |
|
|
if (off < 0 || (len != M_COPYALL && len < 0)) |
if (off < 0 || (len != M_COPYALL && len < 0)) |
panic("m_copym: off %d, len %d", off, len); |
panic("%s: off %d, len %d", __func__, off, len); |
if (off == 0 && m->m_flags & M_PKTHDR) |
if (off == 0 && m->m_flags & M_PKTHDR) |
copyhdr = 1; |
copyhdr = 1; |
while (off > 0) { |
while (off > 0) { |
if (m == NULL) |
if (m == NULL) |
panic("m_copym: m == 0, off %d", off); |
panic("%s: m == NULL, off %d", __func__, off); |
if (off < m->m_len) |
if (off < m->m_len) |
break; |
break; |
off -= m->m_len; |
off -= m->m_len; |
Line 745 m_copym0(struct mbuf *m, int off0, int l |
|
Line 681 m_copym0(struct mbuf *m, int off0, int l |
|
while (len == M_COPYALL || len > 0) { |
while (len == M_COPYALL || len > 0) { |
if (m == NULL) { |
if (m == NULL) { |
if (len != M_COPYALL) |
if (len != M_COPYALL) |
panic("m_copym: m == 0, len %d [!COPYALL]", |
panic("%s: m == NULL, len %d [!COPYALL]", |
len); |
__func__, len); |
break; |
break; |
} |
} |
|
|
Line 779 m_copym0(struct mbuf *m, int off0, int l |
|
Line 715 m_copym0(struct mbuf *m, int off0, int l |
|
n->m_len = 0; |
n->m_len = 0; |
n->m_len = M_TRAILINGSPACE(n); |
n->m_len = M_TRAILINGSPACE(n); |
n->m_len = m_copylen(len, n->m_len); |
n->m_len = m_copylen(len, n->m_len); |
n->m_len = min(n->m_len, m->m_len - off); |
n->m_len = uimin(n->m_len, m->m_len - off); |
memcpy(mtod(n, void *), mtod(m, char *) + off, |
memcpy(mtod(n, void *), mtod(m, char *) + off, |
(unsigned)n->m_len); |
(unsigned)n->m_len); |
} |
} |
Line 791 m_copym0(struct mbuf *m, int off0, int l |
|
Line 727 m_copym0(struct mbuf *m, int off0, int l |
|
if (len != M_COPYALL) |
if (len != M_COPYALL) |
len -= n->m_len; |
len -= n->m_len; |
off += n->m_len; |
off += n->m_len; |
#ifdef DIAGNOSTIC |
|
if (off > m->m_len) |
KASSERT(off <= m->m_len); |
panic("m_copym0 overrun %d %d", off, m->m_len); |
|
#endif |
|
if (off == m->m_len) { |
if (off == m->m_len) { |
m = m->m_next; |
m = m->m_next; |
off = 0; |
off = 0; |
Line 802 m_copym0(struct mbuf *m, int off0, int l |
|
Line 737 m_copym0(struct mbuf *m, int off0, int l |
|
np = &n->m_next; |
np = &n->m_next; |
} |
} |
|
|
if (top == NULL) |
|
MCFail++; |
|
|
|
return top; |
return top; |
|
|
nospace: |
nospace: |
m_freem(top); |
m_freem(top); |
MCFail++; |
|
return NULL; |
return NULL; |
} |
} |
|
|
Line 822 m_copypacket(struct mbuf *m, int how) |
|
Line 753 m_copypacket(struct mbuf *m, int how) |
|
{ |
{ |
struct mbuf *top, *n, *o; |
struct mbuf *top, *n, *o; |
|
|
|
if (__predict_false((m->m_flags & M_PKTHDR) == 0)) { |
|
panic("%s: no header (m = %p)", __func__, m); |
|
} |
|
|
n = m_get(how, m->m_type); |
n = m_get(how, m->m_type); |
top = n; |
top = n; |
if (!n) |
if (!n) |
Line 861 m_copypacket(struct mbuf *m, int how) |
|
Line 796 m_copypacket(struct mbuf *m, int how) |
|
|
|
nospace: |
nospace: |
m_freem(top); |
m_freem(top); |
MCFail++; |
|
return NULL; |
return NULL; |
} |
} |
|
|
/* |
|
* Copy data from an mbuf chain starting "off" bytes from the beginning, |
|
* continuing for "len" bytes, into the indicated buffer. |
|
*/ |
|
void |
void |
m_copydata(struct mbuf *m, int off, int len, void *vp) |
m_copydata(struct mbuf *m, int off, int len, void *cp) |
{ |
{ |
unsigned count; |
unsigned int count; |
void *cp = vp; |
|
struct mbuf *m0 = m; |
struct mbuf *m0 = m; |
int len0 = len; |
int len0 = len; |
int off0 = off; |
int off0 = off; |
void *vp0 = vp; |
void *cp0 = cp; |
|
|
KASSERT(len != M_COPYALL); |
KASSERT(len != M_COPYALL); |
if (off < 0 || len < 0) |
if (off < 0 || len < 0) |
Line 885 m_copydata(struct mbuf *m, int off, int |
|
Line 814 m_copydata(struct mbuf *m, int off, int |
|
while (off > 0) { |
while (off > 0) { |
if (m == NULL) |
if (m == NULL) |
panic("m_copydata(%p,%d,%d,%p): m=NULL, off=%d (%d)", |
panic("m_copydata(%p,%d,%d,%p): m=NULL, off=%d (%d)", |
m0, len0, off0, vp0, off, off0 - off); |
m0, len0, off0, cp0, off, off0 - off); |
if (off < m->m_len) |
if (off < m->m_len) |
break; |
break; |
off -= m->m_len; |
off -= m->m_len; |
Line 895 m_copydata(struct mbuf *m, int off, int |
|
Line 824 m_copydata(struct mbuf *m, int off, int |
|
if (m == NULL) |
if (m == NULL) |
panic("m_copydata(%p,%d,%d,%p): " |
panic("m_copydata(%p,%d,%d,%p): " |
"m=NULL, off=%d (%d), len=%d (%d)", |
"m=NULL, off=%d (%d), len=%d (%d)", |
m0, len0, off0, vp0, |
m0, len0, off0, cp0, |
off, off0 - off, len, len0 - len); |
off, off0 - off, len, len0 - len); |
count = min(m->m_len - off, len); |
count = uimin(m->m_len - off, len); |
memcpy(cp, mtod(m, char *) + off, count); |
memcpy(cp, mtod(m, char *) + off, count); |
len -= count; |
len -= count; |
cp = (char *)cp + count; |
cp = (char *)cp + count; |
Line 1072 m_ensure_contig(struct mbuf **m0, int le |
|
Line 1001 m_ensure_contig(struct mbuf **m0, int le |
|
/* |
/* |
* m_pullup: same as m_ensure_contig(), but destroys mbuf chain on error. |
* m_pullup: same as m_ensure_contig(), but destroys mbuf chain on error. |
*/ |
*/ |
int MPFail; |
|
|
|
struct mbuf * |
struct mbuf * |
m_pullup(struct mbuf *n, int len) |
m_pullup(struct mbuf *n, int len) |
{ |
{ |
Line 1083 m_pullup(struct mbuf *n, int len) |
|
Line 1010 m_pullup(struct mbuf *n, int len) |
|
if (!m_ensure_contig(&m, len)) { |
if (!m_ensure_contig(&m, len)) { |
KASSERT(m != NULL); |
KASSERT(m != NULL); |
m_freem(m); |
m_freem(m); |
MPFail++; |
|
m = NULL; |
m = NULL; |
} |
} |
return m; |
return m; |
Line 1094 m_pullup(struct mbuf *n, int len) |
|
Line 1020 m_pullup(struct mbuf *n, int len) |
|
* the amount of empty space before the data in the new mbuf to be specified |
* the amount of empty space before the data in the new mbuf to be specified |
* (in the event that the caller expects to prepend later). |
* (in the event that the caller expects to prepend later). |
*/ |
*/ |
int MSFail; |
|
|
|
struct mbuf * |
struct mbuf * |
m_copyup(struct mbuf *n, int len, int dstoff) |
m_copyup(struct mbuf *n, int len, int dstoff) |
{ |
{ |
Line 1103 m_copyup(struct mbuf *n, int len, int ds |
|
Line 1027 m_copyup(struct mbuf *n, int len, int ds |
|
int count, space; |
int count, space; |
|
|
KASSERT(len != M_COPYALL); |
KASSERT(len != M_COPYALL); |
if (len > (MHLEN - dstoff)) |
if (len > ((int)MHLEN - dstoff)) |
goto bad; |
goto bad; |
m = m_get(M_DONTWAIT, n->m_type); |
m = m_get(M_DONTWAIT, n->m_type); |
if (m == NULL) |
if (m == NULL) |
Line 1115 m_copyup(struct mbuf *n, int len, int ds |
|
Line 1039 m_copyup(struct mbuf *n, int len, int ds |
|
m->m_data += dstoff; |
m->m_data += dstoff; |
space = &m->m_dat[MLEN] - (m->m_data + m->m_len); |
space = &m->m_dat[MLEN] - (m->m_data + m->m_len); |
do { |
do { |
count = min(min(max(len, max_protohdr), space), n->m_len); |
count = uimin(uimin(uimax(len, max_protohdr), space), n->m_len); |
memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), |
memcpy(mtod(m, char *) + m->m_len, mtod(n, void *), |
(unsigned)count); |
(unsigned)count); |
len -= count; |
len -= count; |
Line 1132 m_copyup(struct mbuf *n, int len, int ds |
|
Line 1056 m_copyup(struct mbuf *n, int len, int ds |
|
goto bad; |
goto bad; |
} |
} |
m->m_next = n; |
m->m_next = n; |
return (m); |
return m; |
bad: |
bad: |
m_freem(n); |
m_freem(n); |
MSFail++; |
return NULL; |
return (NULL); |
|
} |
} |
|
|
/* |
|
* Partition an mbuf chain in two pieces, returning the tail -- |
|
* all but the first len0 bytes. In case of failure, it returns NULL and |
|
* attempts to restore the chain to its original state. |
|
*/ |
|
struct mbuf * |
struct mbuf * |
m_split(struct mbuf *m0, int len0, int wait) |
m_split(struct mbuf *m0, int len, int wait) |
{ |
{ |
|
return m_split_internal(m0, len, wait, true); |
return m_split0(m0, len0, wait, true); |
|
} |
} |
|
|
static struct mbuf * |
static struct mbuf * |
m_split0(struct mbuf *m0, int len0, int wait, bool copyhdr) |
m_split_internal(struct mbuf *m0, int len0, int wait, bool copyhdr) |
{ |
{ |
struct mbuf *m, *n; |
struct mbuf *m, *n; |
unsigned len = len0, remain, len_save; |
unsigned len = len0, remain, len_save; |
Line 1259 m_devget(char *buf, int totlen, int off0 |
|
Line 1176 m_devget(char *buf, int totlen, int off0 |
|
m->m_len = MLEN; |
m->m_len = MLEN; |
} |
} |
|
|
len = min(totlen, epkt - cp); |
len = uimin(totlen, epkt - cp); |
|
|
if (len >= MINCLSIZE) { |
if (len >= MINCLSIZE) { |
MCLGET(m, M_DONTWAIT); |
MCLGET(m, M_DONTWAIT); |
Line 1268 m_devget(char *buf, int totlen, int off0 |
|
Line 1185 m_devget(char *buf, int totlen, int off0 |
|
m_freem(top); |
m_freem(top); |
return NULL; |
return NULL; |
} |
} |
m->m_len = len = min(len, MCLBYTES); |
m->m_len = len = uimin(len, MCLBYTES); |
} else { |
} else { |
/* |
/* |
* Place initial small packet/header at end of mbuf. |
* Place initial small packet/header at end of mbuf. |
Line 1316 m_copyback(struct mbuf *m0, int off, int |
|
Line 1233 m_copyback(struct mbuf *m0, int off, int |
|
#if defined(DEBUG) |
#if defined(DEBUG) |
error = |
error = |
#endif |
#endif |
m_copyback0(&m0, off, len, cp, |
m_copyback_internal(&m0, off, len, cp, CB_COPYBACK|CB_EXTEND, |
M_COPYBACK0_COPYBACK|M_COPYBACK0_EXTEND, M_DONTWAIT); |
M_DONTWAIT); |
|
|
#if defined(DEBUG) |
#if defined(DEBUG) |
if (error != 0 || (m0 != NULL && origm != m0)) |
if (error != 0 || (m0 != NULL && origm != m0)) |
Line 1334 m_copyback_cow(struct mbuf *m0, int off, |
|
Line 1251 m_copyback_cow(struct mbuf *m0, int off, |
|
KASSERT(len != M_COPYALL); |
KASSERT(len != M_COPYALL); |
KDASSERT(off + len <= m_length(m0)); |
KDASSERT(off + len <= m_length(m0)); |
|
|
error = m_copyback0(&m0, off, len, cp, |
error = m_copyback_internal(&m0, off, len, cp, CB_COPYBACK|CB_COW, |
M_COPYBACK0_COPYBACK|M_COPYBACK0_COW, how); |
how); |
if (error) { |
if (error) { |
/* |
/* |
* no way to recover from partial success. |
* no way to recover from partial success. |
Line 1347 m_copyback_cow(struct mbuf *m0, int off, |
|
Line 1264 m_copyback_cow(struct mbuf *m0, int off, |
|
return m0; |
return m0; |
} |
} |
|
|
/* |
|
* m_makewritable: ensure the specified range writable. |
|
*/ |
|
int |
int |
m_makewritable(struct mbuf **mp, int off, int len, int how) |
m_makewritable(struct mbuf **mp, int off, int len, int how) |
{ |
{ |
Line 1358 m_makewritable(struct mbuf **mp, int off |
|
Line 1272 m_makewritable(struct mbuf **mp, int off |
|
int origlen = m_length(*mp); |
int origlen = m_length(*mp); |
#endif |
#endif |
|
|
error = m_copyback0(mp, off, len, NULL, |
error = m_copyback_internal(mp, off, len, NULL, CB_PRESERVE|CB_COW, |
M_COPYBACK0_PRESERVE|M_COPYBACK0_COW, how); |
how); |
|
|
if (error) |
if (error) |
return error; |
return error; |
|
|
Line 1377 m_makewritable(struct mbuf **mp, int off |
|
Line 1290 m_makewritable(struct mbuf **mp, int off |
|
return 0; |
return 0; |
} |
} |
|
|
/* |
static int |
* Copy the mbuf chain to a new mbuf chain that is as short as possible. |
m_copyback_internal(struct mbuf **mp0, int off, int len, const void *vp, |
* Return the new mbuf chain on success, NULL on failure. On success, |
int flags, int how) |
* free the old mbuf chain. |
|
*/ |
|
struct mbuf * |
|
m_defrag(struct mbuf *mold, int flags) |
|
{ |
|
struct mbuf *m0, *mn, *n; |
|
size_t sz = mold->m_pkthdr.len; |
|
|
|
KASSERT((mold->m_flags & M_PKTHDR) != 0); |
|
|
|
m0 = m_gethdr(flags, MT_DATA); |
|
if (m0 == NULL) |
|
return NULL; |
|
M_COPY_PKTHDR(m0, mold); |
|
mn = m0; |
|
|
|
do { |
|
if (sz > MHLEN) { |
|
MCLGET(mn, M_DONTWAIT); |
|
if ((mn->m_flags & M_EXT) == 0) { |
|
m_freem(m0); |
|
return NULL; |
|
} |
|
} |
|
|
|
mn->m_len = MIN(sz, MCLBYTES); |
|
|
|
m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len, |
|
mtod(mn, void *)); |
|
|
|
sz -= mn->m_len; |
|
|
|
if (sz > 0) { |
|
/* need more mbufs */ |
|
n = m_get(M_NOWAIT, MT_DATA); |
|
if (n == NULL) { |
|
m_freem(m0); |
|
return NULL; |
|
} |
|
|
|
mn->m_next = n; |
|
mn = n; |
|
} |
|
} while (sz > 0); |
|
|
|
m_freem(mold); |
|
|
|
return m0; |
|
} |
|
|
|
int |
|
m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags, |
|
int how) |
|
{ |
{ |
int mlen; |
int mlen; |
struct mbuf *m, *n; |
struct mbuf *m, *n; |
Line 1442 m_copyback0(struct mbuf **mp0, int off, |
|
Line 1302 m_copyback0(struct mbuf **mp0, int off, |
|
|
|
KASSERT(mp0 != NULL); |
KASSERT(mp0 != NULL); |
KASSERT(*mp0 != NULL); |
KASSERT(*mp0 != NULL); |
KASSERT((flags & M_COPYBACK0_PRESERVE) == 0 || cp == NULL); |
KASSERT((flags & CB_PRESERVE) == 0 || cp == NULL); |
KASSERT((flags & M_COPYBACK0_COPYBACK) == 0 || cp != NULL); |
KASSERT((flags & CB_COPYBACK) == 0 || cp != NULL); |
|
|
if (len == M_COPYALL) |
if (len == M_COPYALL) |
len = m_length(*mp0) - off; |
len = m_length(*mp0) - off; |
|
|
/* |
/* |
* we don't bother to update "totlen" in the case of M_COPYBACK0_COW, |
* we don't bother to update "totlen" in the case of CB_COW, |
* assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive. |
* assuming that CB_EXTEND and CB_COW are exclusive. |
*/ |
*/ |
|
|
KASSERT((~flags & (M_COPYBACK0_EXTEND|M_COPYBACK0_COW)) != 0); |
KASSERT((~flags & (CB_EXTEND|CB_COW)) != 0); |
|
|
mp = mp0; |
mp = mp0; |
m = *mp; |
m = *mp; |
Line 1463 m_copyback0(struct mbuf **mp0, int off, |
|
Line 1323 m_copyback0(struct mbuf **mp0, int off, |
|
if (m->m_next == NULL) { |
if (m->m_next == NULL) { |
int tspace; |
int tspace; |
extend: |
extend: |
if ((flags & M_COPYBACK0_EXTEND) == 0) |
if ((flags & CB_EXTEND) == 0) |
goto out; |
goto out; |
|
|
/* |
/* |
|
|
} |
} |
tspace = M_TRAILINGSPACE(m); |
tspace = M_TRAILINGSPACE(m); |
if (tspace > 0) { |
if (tspace > 0) { |
tspace = min(tspace, off + len); |
tspace = uimin(tspace, off + len); |
KASSERT(tspace > 0); |
KASSERT(tspace > 0); |
memset(mtod(m, char *) + m->m_len, 0, |
memset(mtod(m, char *) + m->m_len, 0, |
min(off, tspace)); |
uimin(off, tspace)); |
m->m_len += tspace; |
m->m_len += tspace; |
off += mlen; |
off += mlen; |
totlen -= mlen; |
totlen -= mlen; |
|
|
if (n == NULL) { |
if (n == NULL) { |
goto out; |
goto out; |
} |
} |
n->m_len = min(M_TRAILINGSPACE(n), off + len); |
n->m_len = uimin(M_TRAILINGSPACE(n), off + len); |
memset(mtod(n, char *), 0, min(n->m_len, off)); |
memset(mtod(n, char *), 0, uimin(n->m_len, off)); |
m->m_next = n; |
m->m_next = n; |
} |
} |
mp = &m->m_next; |
mp = &m->m_next; |
|
|
while (len > 0) { |
while (len > 0) { |
mlen = m->m_len - off; |
mlen = m->m_len - off; |
if (mlen != 0 && M_READONLY(m)) { |
if (mlen != 0 && M_READONLY(m)) { |
char *datap; |
|
int eatlen; |
|
|
|
/* |
/* |
* this mbuf is read-only. |
* This mbuf is read-only. Allocate a new writable |
* allocate a new writable mbuf and try again. |
* mbuf and try again. |
*/ |
*/ |
|
char *datap; |
|
int eatlen; |
|
|
#if defined(DIAGNOSTIC) |
KASSERT((flags & CB_COW) != 0); |
if ((flags & M_COPYBACK0_COW) == 0) |
|
panic("m_copyback0: read-only"); |
|
#endif /* defined(DIAGNOSTIC) */ |
|
|
|
/* |
/* |
* if we're going to write into the middle of |
* if we're going to write into the middle of |
* a mbuf, split it first. |
* a mbuf, split it first. |
*/ |
*/ |
if (off > 0) { |
if (off > 0) { |
n = m_split0(m, off, how, false); |
n = m_split_internal(m, off, how, false); |
if (n == NULL) |
if (n == NULL) |
goto enobufs; |
goto enobufs; |
m->m_next = n; |
m->m_next = n; |
|
|
* free the region which has been overwritten. |
* free the region which has been overwritten. |
* copying data from old mbufs if requested. |
* copying data from old mbufs if requested. |
*/ |
*/ |
if (flags & M_COPYBACK0_PRESERVE) |
if (flags & CB_PRESERVE) |
datap = mtod(n, char *); |
datap = mtod(n, char *); |
else |
else |
datap = NULL; |
datap = NULL; |
eatlen = n->m_len; |
eatlen = n->m_len; |
while (m != NULL && M_READONLY(m) && |
while (m != NULL && M_READONLY(m) && |
n->m_type == m->m_type && eatlen > 0) { |
n->m_type == m->m_type && eatlen > 0) { |
mlen = min(eatlen, m->m_len); |
mlen = uimin(eatlen, m->m_len); |
if (datap) { |
if (datap) { |
m_copydata(m, 0, mlen, datap); |
m_copydata(m, 0, mlen, datap); |
datap += mlen; |
datap += mlen; |
|
|
*mp = m = n; |
*mp = m = n; |
continue; |
continue; |
} |
} |
mlen = min(mlen, len); |
mlen = uimin(mlen, len); |
if (flags & M_COPYBACK0_COPYBACK) { |
if (flags & CB_COPYBACK) { |
memcpy(mtod(m, char *) + off, cp, (unsigned)mlen); |
memcpy(mtod(m, char *) + off, cp, (unsigned)mlen); |
cp += mlen; |
cp += mlen; |
} |
} |
|
|
mp = &m->m_next; |
mp = &m->m_next; |
m = m->m_next; |
m = m->m_next; |
} |
} |
out: if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) { |
|
KASSERT((flags & M_COPYBACK0_EXTEND) != 0); |
out: |
|
if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) { |
|
KASSERT((flags & CB_EXTEND) != 0); |
m->m_pkthdr.len = totlen; |
m->m_pkthdr.len = totlen; |
} |
} |
|
|
|
|
return ENOBUFS; |
return ENOBUFS; |
} |
} |
|
|
|
/* |
|
* Compress the mbuf chain. Return the new mbuf chain on success, NULL on |
|
* failure. The first mbuf is preserved, and on success the pointer returned |
|
* is the same as the one passed. |
|
*/ |
|
struct mbuf * |
|
m_defrag(struct mbuf *m, int how) |
|
{ |
|
struct mbuf *m0, *mn, *n; |
|
int sz; |
|
|
|
KASSERT((m->m_flags & M_PKTHDR) != 0); |
|
|
|
if (m->m_next == NULL) |
|
return m; |
|
|
|
m0 = m_get(how, MT_DATA); |
|
if (m0 == NULL) |
|
return NULL; |
|
mn = m0; |
|
|
|
sz = m->m_pkthdr.len - m->m_len; |
|
KASSERT(sz >= 0); |
|
|
|
do { |
|
if (sz > MLEN) { |
|
MCLGET(mn, how); |
|
if ((mn->m_flags & M_EXT) == 0) { |
|
m_freem(m0); |
|
return NULL; |
|
} |
|
} |
|
|
|
mn->m_len = MIN(sz, MCLBYTES); |
|
|
|
m_copydata(m, m->m_pkthdr.len - sz, mn->m_len, |
|
mtod(mn, void *)); |
|
|
|
sz -= mn->m_len; |
|
|
|
if (sz > 0) { |
|
/* need more mbufs */ |
|
n = m_get(how, MT_DATA); |
|
if (n == NULL) { |
|
m_freem(m0); |
|
return NULL; |
|
} |
|
|
|
mn->m_next = n; |
|
mn = n; |
|
} |
|
} while (sz > 0); |
|
|
|
m_freem(m->m_next); |
|
m->m_next = m0; |
|
|
|
return m; |
|
} |
|
|
void |
void |
m_move_pkthdr(struct mbuf *to, struct mbuf *from) |
m_remove_pkthdr(struct mbuf *m) |
{ |
{ |
|
KASSERT(m->m_flags & M_PKTHDR); |
|
|
|
m_tag_delete_chain(m, NULL); |
|
m->m_flags &= ~M_PKTHDR; |
|
memset(&m->m_pkthdr, 0, sizeof(m->m_pkthdr)); |
|
} |
|
|
|
void |
|
m_copy_pkthdr(struct mbuf *to, struct mbuf *from) |
|
{ |
|
KASSERT((to->m_flags & M_EXT) == 0); |
|
KASSERT((to->m_flags & M_PKTHDR) == 0 || m_tag_first(to) == NULL); |
|
KASSERT((from->m_flags & M_PKTHDR) != 0); |
|
|
|
to->m_pkthdr = from->m_pkthdr; |
|
to->m_flags = from->m_flags & M_COPYFLAGS; |
|
to->m_data = to->m_pktdat; |
|
|
|
SLIST_INIT(&to->m_pkthdr.tags); |
|
m_tag_copy_chain(to, from); |
|
} |
|
|
|
void |
|
m_move_pkthdr(struct mbuf *to, struct mbuf *from) |
|
{ |
KASSERT((to->m_flags & M_EXT) == 0); |
KASSERT((to->m_flags & M_EXT) == 0); |
KASSERT((to->m_flags & M_PKTHDR) == 0 || m_tag_first(to) == NULL); |
KASSERT((to->m_flags & M_PKTHDR) == 0 || m_tag_first(to) == NULL); |
KASSERT((from->m_flags & M_PKTHDR) != 0); |
KASSERT((from->m_flags & M_PKTHDR) != 0); |
Line 1656 m_apply(struct mbuf *m, int off, int len |
|
Line 1597 m_apply(struct mbuf *m, int off, int len |
|
} |
} |
while (len > 0) { |
while (len > 0) { |
KASSERT(m != NULL); |
KASSERT(m != NULL); |
count = min(m->m_len - off, len); |
count = uimin(m->m_len - off, len); |
|
|
rval = (*f)(arg, mtod(m, char *) + off, count); |
rval = (*f)(arg, mtod(m, char *) + off, count); |
if (rval) |
if (rval) |
Line 1701 m_getptr(struct mbuf *m, int loc, int *o |
|
Line 1642 m_getptr(struct mbuf *m, int loc, int *o |
|
return NULL; |
return NULL; |
} |
} |
|
|
/* |
|
* m_ext_free: release a reference to the mbuf external storage. |
|
* |
|
* => free the mbuf m itself as well. |
|
*/ |
|
|
|
void |
|
m_ext_free(struct mbuf *m) |
|
{ |
|
const bool embedded = MEXT_ISEMBEDDED(m); |
|
bool dofree = true; |
|
u_int refcnt; |
|
|
|
KASSERT((m->m_flags & M_EXT) != 0); |
|
KASSERT(MEXT_ISEMBEDDED(m->m_ext_ref)); |
|
KASSERT((m->m_ext_ref->m_flags & M_EXT) != 0); |
|
KASSERT((m->m_flags & M_EXT_CLUSTER) == |
|
(m->m_ext_ref->m_flags & M_EXT_CLUSTER)); |
|
|
|
if (__predict_false(m->m_type == MT_FREE)) { |
|
panic("mbuf %p already freed", m); |
|
} |
|
|
|
if (__predict_true(m->m_ext.ext_refcnt == 1)) { |
|
refcnt = m->m_ext.ext_refcnt = 0; |
|
} else { |
|
refcnt = atomic_dec_uint_nv(&m->m_ext.ext_refcnt); |
|
} |
|
|
|
if (refcnt > 0) { |
|
if (embedded) { |
|
/* |
|
* other mbuf's m_ext_ref still points to us. |
|
*/ |
|
dofree = false; |
|
} else { |
|
m->m_ext_ref = m; |
|
} |
|
} else { |
|
/* |
|
* dropping the last reference |
|
*/ |
|
if (!embedded) { |
|
m->m_ext.ext_refcnt++; /* XXX */ |
|
m_ext_free(m->m_ext_ref); |
|
m->m_ext_ref = m; |
|
} else if ((m->m_flags & M_EXT_CLUSTER) != 0) { |
|
pool_cache_put_paddr((struct pool_cache *) |
|
m->m_ext.ext_arg, |
|
m->m_ext.ext_buf, m->m_ext.ext_paddr); |
|
} else if (m->m_ext.ext_free) { |
|
(*m->m_ext.ext_free)(m, |
|
m->m_ext.ext_buf, m->m_ext.ext_size, |
|
m->m_ext.ext_arg); |
|
/* |
|
* 'm' is already freed by the ext_free callback. |
|
*/ |
|
dofree = false; |
|
} else { |
|
free(m->m_ext.ext_buf, m->m_ext.ext_type); |
|
} |
|
} |
|
|
|
if (dofree) { |
|
m->m_type = MT_FREE; |
|
m->m_data = NULL; |
|
pool_cache_put(mb_cache, m); |
|
} |
|
} |
|
|
|
#if defined(DDB) |
#if defined(DDB) |
void |
void |
m_print(const struct mbuf *m, const char *modif, void (*pr)(const char *, ...)) |
m_print(const struct mbuf *m, const char *modif, void (*pr)(const char *, ...)) |
{ |
{ |
char ch; |
char ch; |
bool opt_c = false; |
bool opt_c = false; |
|
bool opt_d = false; |
|
#if NETHER > 0 |
|
bool opt_v = false; |
|
const struct mbuf *m0 = NULL; |
|
#endif |
|
int no = 0; |
char buf[512]; |
char buf[512]; |
|
|
while ((ch = *(modif++)) != '\0') { |
while ((ch = *(modif++)) != '\0') { |
Line 1784 m_print(const struct mbuf *m, const char |
|
Line 1661 m_print(const struct mbuf *m, const char |
|
case 'c': |
case 'c': |
opt_c = true; |
opt_c = true; |
break; |
break; |
|
case 'd': |
|
opt_d = true; |
|
break; |
|
#if NETHER > 0 |
|
case 'v': |
|
opt_v = true; |
|
m0 = m; |
|
break; |
|
#endif |
|
default: |
|
break; |
} |
} |
} |
} |
|
|
nextchain: |
nextchain: |
(*pr)("MBUF %p\n", m); |
(*pr)("MBUF(%d) %p\n", no, m); |
snprintb(buf, sizeof(buf), M_FLAGS_BITS, (u_int)m->m_flags); |
snprintb(buf, sizeof(buf), M_FLAGS_BITS, (u_int)m->m_flags); |
(*pr)(" data=%p, len=%d, type=%d, flags=%s\n", |
(*pr)(" data=%p, len=%d, type=%d, flags=%s\n", |
m->m_data, m->m_len, m->m_type, buf); |
m->m_data, m->m_len, m->m_type, buf); |
|
if (opt_d) { |
|
int i; |
|
unsigned char *p = m->m_data; |
|
|
|
(*pr)(" data:"); |
|
|
|
for (i = 0; i < m->m_len; i++) { |
|
if (i % 16 == 0) |
|
(*pr)("\n"); |
|
(*pr)(" %02x", p[i]); |
|
} |
|
|
|
(*pr)("\n"); |
|
} |
(*pr)(" owner=%p, next=%p, nextpkt=%p\n", m->m_owner, m->m_next, |
(*pr)(" owner=%p, next=%p, nextpkt=%p\n", m->m_owner, m->m_next, |
m->m_nextpkt); |
m->m_nextpkt); |
(*pr)(" leadingspace=%u, trailingspace=%u, readonly=%u\n", |
(*pr)(" leadingspace=%u, trailingspace=%u, readonly=%u\n", |
|
|
if (opt_c) { |
if (opt_c) { |
m = m->m_next; |
m = m->m_next; |
if (m != NULL) { |
if (m != NULL) { |
|
no++; |
goto nextchain; |
goto nextchain; |
} |
} |
} |
} |
} |
|
#endif /* defined(DDB) */ |
|
|
|
void |
#if NETHER > 0 |
mbstat_type_add(int type, int diff) |
if (opt_v && m0) |
{ |
m_examine(m0, AF_ETHER, modif, pr); |
struct mbstat_cpu *mb; |
#endif |
int s; |
|
|
|
s = splvm(); |
|
mb = percpu_getref(mbstat_percpu); |
|
mb->m_mtypes[type] += diff; |
|
percpu_putref(mbstat_percpu); |
|
splx(s); |
|
} |
} |
|
#endif /* defined(DDB) */ |
|
|
#if defined(MBUFTRACE) |
#if defined(MBUFTRACE) |
void |
void |
Line 1897 mowner_ref(struct mbuf *m, int flags) |
|
Line 1792 mowner_ref(struct mbuf *m, int flags) |
|
mc = percpu_getref(mo->mo_counters); |
mc = percpu_getref(mo->mo_counters); |
if ((flags & M_EXT) != 0) |
if ((flags & M_EXT) != 0) |
mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++; |
mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++; |
if ((flags & M_CLUSTER) != 0) |
if ((flags & M_EXT_CLUSTER) != 0) |
mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++; |
mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++; |
percpu_putref(mo->mo_counters); |
percpu_putref(mo->mo_counters); |
splx(s); |
splx(s); |
Line 1914 mowner_revoke(struct mbuf *m, bool all, |
|
Line 1809 mowner_revoke(struct mbuf *m, bool all, |
|
mc = percpu_getref(mo->mo_counters); |
mc = percpu_getref(mo->mo_counters); |
if ((flags & M_EXT) != 0) |
if ((flags & M_EXT) != 0) |
mc->mc_counter[MOWNER_COUNTER_EXT_RELEASES]++; |
mc->mc_counter[MOWNER_COUNTER_EXT_RELEASES]++; |
if ((flags & M_CLUSTER) != 0) |
if ((flags & M_EXT_CLUSTER) != 0) |
mc->mc_counter[MOWNER_COUNTER_CLUSTER_RELEASES]++; |
mc->mc_counter[MOWNER_COUNTER_CLUSTER_RELEASES]++; |
if (all) |
if (all) |
mc->mc_counter[MOWNER_COUNTER_RELEASES]++; |
mc->mc_counter[MOWNER_COUNTER_RELEASES]++; |
Line 1936 mowner_claim(struct mbuf *m, struct mown |
|
Line 1831 mowner_claim(struct mbuf *m, struct mown |
|
mc->mc_counter[MOWNER_COUNTER_CLAIMS]++; |
mc->mc_counter[MOWNER_COUNTER_CLAIMS]++; |
if ((flags & M_EXT) != 0) |
if ((flags & M_EXT) != 0) |
mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++; |
mc->mc_counter[MOWNER_COUNTER_EXT_CLAIMS]++; |
if ((flags & M_CLUSTER) != 0) |
if ((flags & M_EXT_CLUSTER) != 0) |
mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++; |
mc->mc_counter[MOWNER_COUNTER_CLUSTER_CLAIMS]++; |
percpu_putref(mo->mo_counters); |
percpu_putref(mo->mo_counters); |
splx(s); |
splx(s); |
Line 1953 m_claim(struct mbuf *m, struct mowner *m |
|
Line 1848 m_claim(struct mbuf *m, struct mowner *m |
|
mowner_revoke(m, true, m->m_flags); |
mowner_revoke(m, true, m->m_flags); |
mowner_claim(m, mo); |
mowner_claim(m, mo); |
} |
} |
|
|
|
void |
|
m_claimm(struct mbuf *m, struct mowner *mo) |
|
{ |
|
|
|
for (; m != NULL; m = m->m_next) |
|
m_claim(m, mo); |
|
} |
#endif /* defined(MBUFTRACE) */ |
#endif /* defined(MBUFTRACE) */ |
|
|
|
#ifdef DIAGNOSTIC |
|
/* |
|
* Verify that the mbuf chain is not malformed. Used only for diagnostic. |
|
* Panics on error. |
|
*/ |
|
void |
|
m_verify_packet(struct mbuf *m) |
|
{ |
|
struct mbuf *n = m; |
|
char *low, *high, *dat; |
|
int totlen = 0, len; |
|
|
|
if (__predict_false((m->m_flags & M_PKTHDR) == 0)) { |
|
panic("%s: mbuf doesn't have M_PKTHDR", __func__); |
|
} |
|
|
|
while (n != NULL) { |
|
if (__predict_false(n->m_type == MT_FREE)) { |
|
panic("%s: mbuf already freed (n = %p)", __func__, n); |
|
} |
|
#if 0 |
|
/* |
|
* This ought to be a rule of the mbuf API. Unfortunately, |
|
* many places don't respect that rule. |
|
*/ |
|
if (__predict_false((n != m) && (n->m_flags & M_PKTHDR) != 0)) { |
|
panic("%s: M_PKTHDR set on secondary mbuf", __func__); |
|
} |
|
#endif |
|
if (__predict_false(n->m_nextpkt != NULL)) { |
|
panic("%s: m_nextpkt not null (m_nextpkt = %p)", |
|
__func__, n->m_nextpkt); |
|
} |
|
|
|
dat = n->m_data; |
|
len = n->m_len; |
|
|
|
if (n->m_flags & M_EXT) { |
|
low = n->m_ext.ext_buf; |
|
high = low + n->m_ext.ext_size; |
|
} else if (n->m_flags & M_PKTHDR) { |
|
low = n->m_pktdat; |
|
high = low + MHLEN; |
|
} else { |
|
low = n->m_dat; |
|
high = low + MLEN; |
|
} |
|
if (__predict_false(dat + len < dat)) { |
|
panic("%s: incorrect length (len = %d)", __func__, len); |
|
} |
|
if (__predict_false((dat < low) || (dat + len > high))) { |
|
panic("%s: m_data not in packet" |
|
"(dat = %p, len = %d, low = %p, high = %p)", |
|
__func__, dat, len, low, high); |
|
} |
|
|
|
totlen += len; |
|
n = n->m_next; |
|
} |
|
|
|
if (__predict_false(totlen != m->m_pkthdr.len)) { |
|
panic("%s: inconsistent mbuf length (%d != %d)", __func__, |
|
totlen, m->m_pkthdr.len); |
|
} |
|
} |
|
#endif |
|
|
|
/* |
|
* Release a reference to the mbuf external storage. |
|
* |
|
* => free the mbuf m itself as well. |
|
*/ |
|
static void |
|
m_ext_free(struct mbuf *m) |
|
{ |
|
const bool embedded = MEXT_ISEMBEDDED(m); |
|
bool dofree = true; |
|
u_int refcnt; |
|
|
|
KASSERT((m->m_flags & M_EXT) != 0); |
|
KASSERT(MEXT_ISEMBEDDED(m->m_ext_ref)); |
|
KASSERT((m->m_ext_ref->m_flags & M_EXT) != 0); |
|
KASSERT((m->m_flags & M_EXT_CLUSTER) == |
|
(m->m_ext_ref->m_flags & M_EXT_CLUSTER)); |
|
|
|
if (__predict_false(m->m_type == MT_FREE)) { |
|
panic("mbuf %p already freed", m); |
|
} |
|
|
|
if (__predict_true(m->m_ext.ext_refcnt == 1)) { |
|
refcnt = m->m_ext.ext_refcnt = 0; |
|
} else { |
|
refcnt = atomic_dec_uint_nv(&m->m_ext.ext_refcnt); |
|
} |
|
|
|
if (refcnt > 0) { |
|
if (embedded) { |
|
/* |
|
* other mbuf's m_ext_ref still points to us. |
|
*/ |
|
dofree = false; |
|
} else { |
|
m->m_ext_ref = m; |
|
} |
|
} else { |
|
/* |
|
* dropping the last reference |
|
*/ |
|
if (!embedded) { |
|
m->m_ext.ext_refcnt++; /* XXX */ |
|
m_ext_free(m->m_ext_ref); |
|
m->m_ext_ref = m; |
|
} else if ((m->m_flags & M_EXT_CLUSTER) != 0) { |
|
pool_cache_put_paddr(mcl_cache, |
|
m->m_ext.ext_buf, m->m_ext.ext_paddr); |
|
} else if (m->m_ext.ext_free) { |
|
(*m->m_ext.ext_free)(m, |
|
m->m_ext.ext_buf, m->m_ext.ext_size, |
|
m->m_ext.ext_arg); |
|
/* |
|
* 'm' is already freed by the ext_free callback. |
|
*/ |
|
dofree = false; |
|
} else { |
|
free(m->m_ext.ext_buf, 0); |
|
} |
|
} |
|
|
|
if (dofree) { |
|
m->m_type = MT_FREE; |
|
m->m_data = NULL; |
|
pool_cache_put(mb_cache, m); |
|
} |
|
} |
|
|
/* |
/* |
* Free a single mbuf and associated external storage. Return the |
* Free a single mbuf and associated external storage. Return the |
* successor, if any. |
* successor, if any. |