version 1.54.2.10, 2002/04/17 00:06:19 |
version 1.140.6.2, 2007/10/02 18:29:06 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
|
/*- |
|
* Copyright (c) 2002, 2007 The NetBSD Foundation, Inc. |
|
* All rights reserved. |
|
* |
|
* This code is derived from software contributed to The NetBSD Foundation |
|
* by Jason R. Thorpe of Wasabi Systems, Inc. |
|
* |
|
* Redistribution and use in source and binary forms, with or without |
|
* modification, are permitted provided that the following conditions |
|
* are met: |
|
* 1. Redistributions of source code must retain the above copyright |
|
* notice, this list of conditions and the following disclaimer. |
|
* 2. Redistributions in binary form must reproduce the above copyright |
|
* notice, this list of conditions and the following disclaimer in the |
|
* documentation and/or other materials provided with the distribution. |
|
* 3. All advertising materials mentioning features or use of this software |
|
* must display the following acknowledgement: |
|
* This product includes software developed by the NetBSD |
|
* Foundation, Inc. and its contributors. |
|
* 4. Neither the name of The NetBSD Foundation nor the names of its |
|
* contributors may be used to endorse or promote products derived |
|
* from this software without specific prior written permission. |
|
* |
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
* POSSIBILITY OF SUCH DAMAGE. |
|
*/ |
|
|
/* |
/* |
* Copyright (c) 1982, 1986, 1988, 1990, 1993 |
* Copyright (c) 1982, 1986, 1988, 1990, 1993 |
* The Regents of the University of California. All rights reserved. |
* The Regents of the University of California. All rights reserved. |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
* 2. Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* documentation and/or other materials provided with the distribution. |
* 3. All advertising materials mentioning features or use of this software |
* 3. Neither the name of the University nor the names of its contributors |
* must display the following acknowledgement: |
|
* This product includes software developed by the University of |
|
* California, Berkeley and its contributors. |
|
* 4. Neither the name of the University nor the names of its contributors |
|
* may be used to endorse or promote products derived from this software |
* may be used to endorse or promote products derived from this software |
* without specific prior written permission. |
* without specific prior written permission. |
* |
* |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include "opt_sock_counters.h" |
|
#include "opt_sosend_loan.h" |
|
#include "opt_mbuftrace.h" |
|
#include "opt_somaxkva.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/lwp.h> |
|
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/file.h> |
#include <sys/file.h> |
|
#include <sys/filedesc.h> |
#include <sys/malloc.h> |
#include <sys/malloc.h> |
#include <sys/mbuf.h> |
#include <sys/mbuf.h> |
#include <sys/domain.h> |
#include <sys/domain.h> |
Line 53 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 90 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/signalvar.h> |
#include <sys/signalvar.h> |
#include <sys/resourcevar.h> |
#include <sys/resourcevar.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
|
#include <sys/event.h> |
|
#include <sys/poll.h> |
|
#include <sys/kauth.h> |
|
#include <sys/mutex.h> |
|
#include <sys/condvar.h> |
|
|
struct pool socket_pool; |
#include <uvm/uvm.h> |
|
|
|
POOL_INIT(socket_pool, sizeof(struct socket), 0, 0, 0, "sockpl", NULL, |
|
IPL_SOFTNET); |
|
|
|
MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options"); |
|
MALLOC_DEFINE(M_SONAME, "soname", "socket name"); |
|
|
|
extern const struct fileops socketops; |
|
|
extern int somaxconn; /* patchable (XXX sysctl) */ |
extern int somaxconn; /* patchable (XXX sysctl) */ |
int somaxconn = SOMAXCONN; |
int somaxconn = SOMAXCONN; |
|
|
|
#ifdef SOSEND_COUNTERS |
|
#include <sys/device.h> |
|
|
|
static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "loan big"); |
|
static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "copy big"); |
|
static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "copy small"); |
|
static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "kva limit"); |
|
|
|
#define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++ |
|
|
|
EVCNT_ATTACH_STATIC(sosend_loan_big); |
|
EVCNT_ATTACH_STATIC(sosend_copy_big); |
|
EVCNT_ATTACH_STATIC(sosend_copy_small); |
|
EVCNT_ATTACH_STATIC(sosend_kvalimit); |
|
#else |
|
|
|
#define SOSEND_COUNTER_INCR(ev) /* nothing */ |
|
|
|
#endif /* SOSEND_COUNTERS */ |
|
|
|
static struct callback_entry sokva_reclaimerentry; |
|
|
|
#ifdef SOSEND_NO_LOAN |
|
int sock_loan_thresh = -1; |
|
#else |
|
int sock_loan_thresh = 4096; |
|
#endif |
|
|
|
static kmutex_t so_pendfree_lock; |
|
static struct mbuf *so_pendfree; |
|
|
|
#ifndef SOMAXKVA |
|
#define SOMAXKVA (16 * 1024 * 1024) |
|
#endif |
|
int somaxkva = SOMAXKVA; |
|
static int socurkva; |
|
static kcondvar_t socurkva_cv; |
|
|
|
#define SOCK_LOAN_CHUNK 65536 |
|
|
|
static size_t sodopendfree(void); |
|
static size_t sodopendfreel(void); |
|
|
|
static vsize_t |
|
sokvareserve(struct socket *so, vsize_t len) |
|
{ |
|
int error; |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
while (socurkva + len > somaxkva) { |
|
size_t freed; |
|
|
|
/* |
|
* try to do pendfree. |
|
*/ |
|
|
|
freed = sodopendfreel(); |
|
|
|
/* |
|
* if some kva was freed, try again. |
|
*/ |
|
|
|
if (freed) |
|
continue; |
|
|
|
SOSEND_COUNTER_INCR(&sosend_kvalimit); |
|
error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock); |
|
if (error) { |
|
len = 0; |
|
break; |
|
} |
|
} |
|
socurkva += len; |
|
mutex_exit(&so_pendfree_lock); |
|
return len; |
|
} |
|
|
|
static void |
|
sokvaunreserve(vsize_t len) |
|
{ |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
socurkva -= len; |
|
cv_broadcast(&socurkva_cv); |
|
mutex_exit(&so_pendfree_lock); |
|
} |
|
|
|
/* |
|
* sokvaalloc: allocate kva for loan. |
|
*/ |
|
|
|
vaddr_t |
|
sokvaalloc(vsize_t len, struct socket *so) |
|
{ |
|
vaddr_t lva; |
|
|
|
/* |
|
* reserve kva. |
|
*/ |
|
|
|
if (sokvareserve(so, len) == 0) |
|
return 0; |
|
|
|
/* |
|
* allocate kva. |
|
*/ |
|
|
|
lva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); |
|
if (lva == 0) { |
|
sokvaunreserve(len); |
|
return (0); |
|
} |
|
|
|
return lva; |
|
} |
|
|
|
/* |
|
* sokvafree: free kva for loan. |
|
*/ |
|
|
|
void |
|
sokvafree(vaddr_t sva, vsize_t len) |
|
{ |
|
|
|
/* |
|
* free kva. |
|
*/ |
|
|
|
uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY); |
|
|
|
/* |
|
* unreserve kva. |
|
*/ |
|
|
|
sokvaunreserve(len); |
|
} |
|
|
|
static void |
|
sodoloanfree(struct vm_page **pgs, void *buf, size_t size) |
|
{ |
|
vaddr_t va, sva, eva; |
|
vsize_t len; |
|
paddr_t pa; |
|
int i, npgs; |
|
|
|
eva = round_page((vaddr_t) buf + size); |
|
sva = trunc_page((vaddr_t) buf); |
|
len = eva - sva; |
|
npgs = len >> PAGE_SHIFT; |
|
|
|
if (__predict_false(pgs == NULL)) { |
|
pgs = alloca(npgs * sizeof(*pgs)); |
|
|
|
for (i = 0, va = sva; va < eva; i++, va += PAGE_SIZE) { |
|
if (pmap_extract(pmap_kernel(), va, &pa) == false) |
|
panic("sodoloanfree: va 0x%lx not mapped", va); |
|
pgs[i] = PHYS_TO_VM_PAGE(pa); |
|
} |
|
} |
|
|
|
pmap_kremove(sva, len); |
|
pmap_update(pmap_kernel()); |
|
uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE); |
|
sokvafree(sva, len); |
|
} |
|
|
|
static size_t |
|
sodopendfree() |
|
{ |
|
size_t rv; |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
rv = sodopendfreel(); |
|
mutex_exit(&so_pendfree_lock); |
|
|
|
return rv; |
|
} |
|
|
|
/* |
|
* sodopendfreel: free mbufs on "pendfree" list. |
|
* unlock and relock so_pendfree_lock when freeing mbufs. |
|
* |
|
* => called with so_pendfree_lock held. |
|
*/ |
|
|
|
static size_t |
|
sodopendfreel() |
|
{ |
|
struct mbuf *m, *next; |
|
size_t rv = 0; |
|
int s; |
|
|
|
KASSERT(mutex_owned(&so_pendfree_lock)); |
|
|
|
while (so_pendfree != NULL) { |
|
m = so_pendfree; |
|
so_pendfree = NULL; |
|
mutex_exit(&so_pendfree_lock); |
|
|
|
for (; m != NULL; m = next) { |
|
next = m->m_next; |
|
|
|
rv += m->m_ext.ext_size; |
|
sodoloanfree((m->m_flags & M_EXT_PAGES) ? |
|
m->m_ext.ext_pgs : NULL, m->m_ext.ext_buf, |
|
m->m_ext.ext_size); |
|
s = splvm(); |
|
pool_cache_put(&mbpool_cache, m); |
|
splx(s); |
|
} |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
} |
|
|
|
return (rv); |
|
} |
|
|
|
void |
|
soloanfree(struct mbuf *m, void *buf, size_t size, void *arg) |
|
{ |
|
|
|
if (m == NULL) { |
|
|
|
/* |
|
* called from MEXTREMOVE. |
|
*/ |
|
|
|
sodoloanfree(NULL, buf, size); |
|
return; |
|
} |
|
|
|
/* |
|
* postpone freeing mbuf. |
|
* |
|
* we can't do it in interrupt context |
|
* because we need to put kva back to kernel_map. |
|
*/ |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
m->m_next = so_pendfree; |
|
so_pendfree = m; |
|
cv_broadcast(&socurkva_cv); |
|
mutex_exit(&so_pendfree_lock); |
|
} |
|
|
|
static long |
|
sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space) |
|
{ |
|
struct iovec *iov = uio->uio_iov; |
|
vaddr_t sva, eva; |
|
vsize_t len; |
|
vaddr_t lva, va; |
|
int npgs, i, error; |
|
|
|
if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) |
|
return (0); |
|
|
|
if (iov->iov_len < (size_t) space) |
|
space = iov->iov_len; |
|
if (space > SOCK_LOAN_CHUNK) |
|
space = SOCK_LOAN_CHUNK; |
|
|
|
eva = round_page((vaddr_t) iov->iov_base + space); |
|
sva = trunc_page((vaddr_t) iov->iov_base); |
|
len = eva - sva; |
|
npgs = len >> PAGE_SHIFT; |
|
|
|
/* XXX KDASSERT */ |
|
KASSERT(npgs <= M_EXT_MAXPAGES); |
|
|
|
lva = sokvaalloc(len, so); |
|
if (lva == 0) |
|
return 0; |
|
|
|
error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len, |
|
m->m_ext.ext_pgs, UVM_LOAN_TOPAGE); |
|
if (error) { |
|
sokvafree(lva, len); |
|
return (0); |
|
} |
|
|
|
for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE) |
|
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]), |
|
VM_PROT_READ); |
|
pmap_update(pmap_kernel()); |
|
|
|
lva += (vaddr_t) iov->iov_base & PAGE_MASK; |
|
|
|
MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so); |
|
m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP; |
|
|
|
uio->uio_resid -= space; |
|
/* uio_offset not updated, not set/used for write(2) */ |
|
uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space; |
|
uio->uio_iov->iov_len -= space; |
|
if (uio->uio_iov->iov_len == 0) { |
|
uio->uio_iov++; |
|
uio->uio_iovcnt--; |
|
} |
|
|
|
return (space); |
|
} |
|
|
|
static int |
|
sokva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) |
|
{ |
|
|
|
KASSERT(ce == &sokva_reclaimerentry); |
|
KASSERT(obj == NULL); |
|
|
|
sodopendfree(); |
|
if (!vm_map_starved_p(kernel_map)) { |
|
return CALLBACK_CHAIN_ABORT; |
|
} |
|
return CALLBACK_CHAIN_CONTINUE; |
|
} |
|
|
|
struct mbuf * |
|
getsombuf(struct socket *so) |
|
{ |
|
struct mbuf *m; |
|
|
|
m = m_get(M_WAIT, MT_SONAME); |
|
MCLAIM(m, so->so_mowner); |
|
return m; |
|
} |
|
|
|
struct mbuf * |
|
m_intopt(struct socket *so, int val) |
|
{ |
|
struct mbuf *m; |
|
|
|
m = getsombuf(so); |
|
m->m_len = sizeof(int); |
|
*mtod(m, int *) = val; |
|
return m; |
|
} |
|
|
void |
void |
soinit(void) |
soinit(void) |
{ |
{ |
|
|
pool_init(&socket_pool, sizeof(struct socket), 0, 0, 0, |
mutex_init(&so_pendfree_lock, MUTEX_DRIVER, IPL_VM); |
"sockpl", NULL); |
cv_init(&socurkva_cv, "sokva"); |
|
|
|
/* Set the initial adjusted socket buffer size. */ |
|
if (sb_max_set(sb_max)) |
|
panic("bad initial sb_max value: %lu", sb_max); |
|
|
|
callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback, |
|
&sokva_reclaimerentry, NULL, sokva_reclaim_callback); |
} |
} |
|
|
/* |
/* |
|
|
*/ |
*/ |
/*ARGSUSED*/ |
/*ARGSUSED*/ |
int |
int |
socreate(int dom, struct socket **aso, int type, int proto) |
socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l) |
{ |
{ |
struct proc *p; |
const struct protosw *prp; |
struct protosw *prp; |
|
struct socket *so; |
struct socket *so; |
|
uid_t uid; |
int error, s; |
int error, s; |
|
|
p = curproc->l_proc; /* XXX */ |
error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET, |
|
KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type), |
|
KAUTH_ARG(proto)); |
|
if (error != 0) |
|
return error; |
|
|
if (proto) |
if (proto) |
prp = pffindproto(dom, proto, type); |
prp = pffindproto(dom, proto, type); |
else |
else |
prp = pffindtype(dom, type); |
prp = pffindtype(dom, type); |
if (prp == 0 || prp->pr_usrreq == 0) |
if (prp == NULL) { |
return (EPROTONOSUPPORT); |
/* no support for domain */ |
|
if (pffinddomain(dom) == 0) |
|
return EAFNOSUPPORT; |
|
/* no support for socket type */ |
|
if (proto == 0 && type != 0) |
|
return EPROTOTYPE; |
|
return EPROTONOSUPPORT; |
|
} |
|
if (prp->pr_usrreq == NULL) |
|
return EPROTONOSUPPORT; |
if (prp->pr_type != type) |
if (prp->pr_type != type) |
return (EPROTOTYPE); |
return EPROTOTYPE; |
s = splsoftnet(); |
s = splsoftnet(); |
so = pool_get(&socket_pool, PR_WAITOK); |
so = pool_get(&socket_pool, PR_WAITOK); |
memset((caddr_t)so, 0, sizeof(*so)); |
memset(so, 0, sizeof(*so)); |
TAILQ_INIT(&so->so_q0); |
TAILQ_INIT(&so->so_q0); |
TAILQ_INIT(&so->so_q); |
TAILQ_INIT(&so->so_q); |
so->so_type = type; |
so->so_type = type; |
so->so_proto = prp; |
so->so_proto = prp; |
so->so_send = sosend; |
so->so_send = sosend; |
so->so_receive = soreceive; |
so->so_receive = soreceive; |
if (p != 0) |
#ifdef MBUFTRACE |
so->so_uid = p->p_ucred->cr_uid; |
so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner; |
error = (*prp->pr_usrreq)(so, PRU_ATTACH, (struct mbuf *)0, |
so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner; |
(struct mbuf *)(long)proto, (struct mbuf *)0, p); |
so->so_mowner = &prp->pr_domain->dom_mowner; |
if (error) { |
#endif |
|
selinit(&so->so_rcv.sb_sel); |
|
selinit(&so->so_snd.sb_sel); |
|
uid = kauth_cred_geteuid(l->l_cred); |
|
so->so_uidinfo = uid_find(uid); |
|
error = (*prp->pr_usrreq)(so, PRU_ATTACH, NULL, |
|
(struct mbuf *)(long)proto, NULL, l); |
|
if (error != 0) { |
so->so_state |= SS_NOFDREF; |
so->so_state |= SS_NOFDREF; |
sofree(so); |
sofree(so); |
splx(s); |
splx(s); |
return (error); |
return error; |
} |
} |
splx(s); |
splx(s); |
*aso = so; |
*aso = so; |
return (0); |
return 0; |
|
} |
|
|
|
/* On success, write file descriptor to fdout and return zero. On |
|
* failure, return non-zero; *fdout will be undefined. |
|
*/ |
|
int |
|
fsocreate(int domain, struct socket **sop, int type, int protocol, |
|
struct lwp *l, int *fdout) |
|
{ |
|
struct filedesc *fdp; |
|
struct socket *so; |
|
struct file *fp; |
|
int fd, error; |
|
|
|
fdp = l->l_proc->p_fd; |
|
/* falloc() will use the desciptor for us */ |
|
if ((error = falloc(l, &fp, &fd)) != 0) |
|
return (error); |
|
fp->f_flag = FREAD|FWRITE; |
|
fp->f_type = DTYPE_SOCKET; |
|
fp->f_ops = &socketops; |
|
error = socreate(domain, &so, type, protocol, l); |
|
if (error != 0) { |
|
FILE_UNUSE(fp, l); |
|
fdremove(fdp, fd); |
|
ffree(fp); |
|
} else { |
|
if (sop != NULL) |
|
*sop = so; |
|
fp->f_data = so; |
|
FILE_SET_MATURE(fp); |
|
FILE_UNUSE(fp, l); |
|
*fdout = fd; |
|
} |
|
return error; |
} |
} |
|
|
int |
int |
sobind(struct socket *so, struct mbuf *nam, struct proc *p) |
sobind(struct socket *so, struct mbuf *nam, struct lwp *l) |
{ |
{ |
int s, error; |
int s, error; |
|
|
s = splsoftnet(); |
s = splsoftnet(); |
error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, (struct mbuf *)0, |
error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, l); |
nam, (struct mbuf *)0, p); |
|
splx(s); |
splx(s); |
return (error); |
return error; |
} |
} |
|
|
int |
int |
Line 134 solisten(struct socket *so, int backlog) |
|
Line 588 solisten(struct socket *so, int backlog) |
|
int s, error; |
int s, error; |
|
|
s = splsoftnet(); |
s = splsoftnet(); |
error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, (struct mbuf *)0, |
error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, |
(struct mbuf *)0, (struct mbuf *)0, (struct proc *)0); |
NULL, NULL, NULL); |
if (error) { |
if (error != 0) { |
splx(s); |
splx(s); |
return (error); |
return error; |
} |
} |
if (TAILQ_EMPTY(&so->so_q)) |
if (TAILQ_EMPTY(&so->so_q)) |
so->so_options |= SO_ACCEPTCONN; |
so->so_options |= SO_ACCEPTCONN; |
Line 146 solisten(struct socket *so, int backlog) |
|
Line 600 solisten(struct socket *so, int backlog) |
|
backlog = 0; |
backlog = 0; |
so->so_qlimit = min(backlog, somaxconn); |
so->so_qlimit = min(backlog, somaxconn); |
splx(s); |
splx(s); |
return (0); |
return 0; |
} |
} |
|
|
void |
void |
Line 164 sofree(struct socket *so) |
|
Line 618 sofree(struct socket *so) |
|
if (!soqremque(so, 0)) |
if (!soqremque(so, 0)) |
return; |
return; |
} |
} |
sbrelease(&so->so_snd); |
if (so->so_rcv.sb_hiwat) |
|
(void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0, |
|
RLIM_INFINITY); |
|
if (so->so_snd.sb_hiwat) |
|
(void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0, |
|
RLIM_INFINITY); |
|
sbrelease(&so->so_snd, so); |
sorflush(so); |
sorflush(so); |
|
seldestroy(&so->so_rcv.sb_sel); |
|
seldestroy(&so->so_snd.sb_sel); |
pool_put(&socket_pool, so); |
pool_put(&socket_pool, so); |
} |
} |
|
|
Line 205 soclose(struct socket *so) |
|
Line 667 soclose(struct socket *so) |
|
(so->so_state & SS_NBIO)) |
(so->so_state & SS_NBIO)) |
goto drop; |
goto drop; |
while (so->so_state & SS_ISCONNECTED) { |
while (so->so_state & SS_ISCONNECTED) { |
error = tsleep((caddr_t)&so->so_timeo, |
error = tsleep((void *)&so->so_timeo, |
PSOCK | PCATCH, netcls, |
PSOCK | PCATCH, netcls, |
so->so_linger * hz); |
so->so_linger * hz); |
if (error) |
if (error) |
Line 216 soclose(struct socket *so) |
|
Line 678 soclose(struct socket *so) |
|
drop: |
drop: |
if (so->so_pcb) { |
if (so->so_pcb) { |
int error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, |
int error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, |
NULL, NULL, NULL, NULL); |
(struct proc *)0); |
|
if (error == 0) |
if (error == 0) |
error = error2; |
error = error2; |
} |
} |
Line 236 soclose(struct socket *so) |
|
Line 697 soclose(struct socket *so) |
|
int |
int |
soabort(struct socket *so) |
soabort(struct socket *so) |
{ |
{ |
|
int error; |
|
|
return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, (struct mbuf *)0, |
KASSERT(so->so_head == NULL); |
(struct mbuf *)0, (struct mbuf *)0, (struct proc *)0); |
error = (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, |
|
NULL, NULL, NULL); |
|
if (error) { |
|
sofree(so); |
|
} |
|
return error; |
} |
} |
|
|
int |
int |
Line 254 soaccept(struct socket *so, struct mbuf |
|
Line 721 soaccept(struct socket *so, struct mbuf |
|
if ((so->so_state & SS_ISDISCONNECTED) == 0 || |
if ((so->so_state & SS_ISDISCONNECTED) == 0 || |
(so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) |
(so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) |
error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, |
error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, |
(struct mbuf *)0, nam, (struct mbuf *)0, (struct proc *)0); |
NULL, nam, NULL, NULL); |
else |
else |
error = ECONNABORTED; |
error = ECONNABORTED; |
|
|
Line 263 soaccept(struct socket *so, struct mbuf |
|
Line 730 soaccept(struct socket *so, struct mbuf |
|
} |
} |
|
|
int |
int |
soconnect(struct socket *so, struct mbuf *nam) |
soconnect(struct socket *so, struct mbuf *nam, struct lwp *l) |
{ |
{ |
struct proc *p; |
|
int s, error; |
int s, error; |
|
|
p = curproc->l_proc; /* XXX */ |
|
if (so->so_options & SO_ACCEPTCONN) |
if (so->so_options & SO_ACCEPTCONN) |
return (EOPNOTSUPP); |
return (EOPNOTSUPP); |
s = splsoftnet(); |
s = splsoftnet(); |
Line 284 soconnect(struct socket *so, struct mbuf |
|
Line 749 soconnect(struct socket *so, struct mbuf |
|
error = EISCONN; |
error = EISCONN; |
else |
else |
error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, |
error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, |
(struct mbuf *)0, nam, (struct mbuf *)0, p); |
NULL, nam, NULL, l); |
splx(s); |
splx(s); |
return (error); |
return (error); |
} |
} |
Line 296 soconnect2(struct socket *so1, struct so |
|
Line 761 soconnect2(struct socket *so1, struct so |
|
|
|
s = splsoftnet(); |
s = splsoftnet(); |
error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, |
error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, |
(struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0, |
NULL, (struct mbuf *)so2, NULL, NULL); |
(struct proc *)0); |
|
splx(s); |
splx(s); |
return (error); |
return (error); |
} |
} |
Line 317 sodisconnect(struct socket *so) |
|
Line 781 sodisconnect(struct socket *so) |
|
goto bad; |
goto bad; |
} |
} |
error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, |
error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, |
NULL, NULL, NULL, NULL); |
(struct proc *)0); |
|
bad: |
bad: |
splx(s); |
splx(s); |
|
sodopendfree(); |
return (error); |
return (error); |
} |
} |
|
|
Line 344 sodisconnect(struct socket *so) |
|
Line 808 sodisconnect(struct socket *so) |
|
*/ |
*/ |
int |
int |
sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, |
sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, |
struct mbuf *control, int flags) |
struct mbuf *control, int flags, struct lwp *l) |
{ |
{ |
struct proc *p; |
|
struct mbuf **mp, *m; |
struct mbuf **mp, *m; |
|
struct proc *p; |
long space, len, resid, clen, mlen; |
long space, len, resid, clen, mlen; |
int error, s, dontroute, atomic; |
int error, s, dontroute, atomic; |
|
|
p = curproc->l_proc; /* XXX */ |
p = l->l_proc; |
|
sodopendfree(); |
|
|
clen = 0; |
clen = 0; |
atomic = sosendallatonce(so) || top; |
atomic = sosendallatonce(so) || top; |
if (uio) |
if (uio) |
Line 372 sosend(struct socket *so, struct mbuf *a |
|
Line 838 sosend(struct socket *so, struct mbuf *a |
|
dontroute = |
dontroute = |
(flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && |
(flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && |
(so->so_proto->pr_flags & PR_ATOMIC); |
(so->so_proto->pr_flags & PR_ATOMIC); |
p->p_stats->p_ru.ru_msgsnd++; |
if (p) |
|
p->p_stats->p_ru.ru_msgsnd++; |
if (control) |
if (control) |
clen = control->m_len; |
clen = control->m_len; |
#define snderr(errno) { error = errno; splx(s); goto release; } |
#define snderr(errno) { error = errno; splx(s); goto release; } |
Line 404 sosend(struct socket *so, struct mbuf *a |
|
Line 871 sosend(struct socket *so, struct mbuf *a |
|
if ((atomic && resid > so->so_snd.sb_hiwat) || |
if ((atomic && resid > so->so_snd.sb_hiwat) || |
clen > so->so_snd.sb_hiwat) |
clen > so->so_snd.sb_hiwat) |
snderr(EMSGSIZE); |
snderr(EMSGSIZE); |
if (space < resid + clen && uio && |
if (space < resid + clen && |
(atomic || space < so->so_snd.sb_lowat || space < clen)) { |
(atomic || space < so->so_snd.sb_lowat || space < clen)) { |
if (so->so_state & SS_NBIO) |
if (so->so_state & SS_NBIO) |
snderr(EWOULDBLOCK); |
snderr(EWOULDBLOCK); |
Line 428 sosend(struct socket *so, struct mbuf *a |
|
Line 895 sosend(struct socket *so, struct mbuf *a |
|
top->m_flags |= M_EOR; |
top->m_flags |= M_EOR; |
} else do { |
} else do { |
if (top == 0) { |
if (top == 0) { |
MGETHDR(m, M_WAIT, MT_DATA); |
m = m_gethdr(M_WAIT, MT_DATA); |
mlen = MHLEN; |
mlen = MHLEN; |
m->m_pkthdr.len = 0; |
m->m_pkthdr.len = 0; |
m->m_pkthdr.rcvif = (struct ifnet *)0; |
m->m_pkthdr.rcvif = NULL; |
} else { |
} else { |
MGET(m, M_WAIT, MT_DATA); |
m = m_get(M_WAIT, MT_DATA); |
mlen = MLEN; |
mlen = MLEN; |
} |
} |
|
MCLAIM(m, so->so_snd.sb_mowner); |
|
if (sock_loan_thresh >= 0 && |
|
uio->uio_iov->iov_len >= sock_loan_thresh && |
|
space >= sock_loan_thresh && |
|
(len = sosend_loan(so, uio, m, |
|
space)) != 0) { |
|
SOSEND_COUNTER_INCR(&sosend_loan_big); |
|
space -= len; |
|
goto have_data; |
|
} |
if (resid >= MINCLSIZE && space >= MCLBYTES) { |
if (resid >= MINCLSIZE && space >= MCLBYTES) { |
MCLGET(m, M_WAIT); |
SOSEND_COUNTER_INCR(&sosend_copy_big); |
|
m_clget(m, M_WAIT); |
if ((m->m_flags & M_EXT) == 0) |
if ((m->m_flags & M_EXT) == 0) |
goto nopages; |
goto nopages; |
mlen = MCLBYTES; |
mlen = MCLBYTES; |
#ifdef MAPPED_MBUFS |
|
len = lmin(MCLBYTES, resid); |
|
#else |
|
if (atomic && top == 0) { |
if (atomic && top == 0) { |
len = lmin(MCLBYTES - max_hdr, |
len = lmin(MCLBYTES - max_hdr, |
resid); |
resid); |
m->m_data += max_hdr; |
m->m_data += max_hdr; |
} else |
} else |
len = lmin(MCLBYTES, resid); |
len = lmin(MCLBYTES, resid); |
#endif |
|
space -= len; |
space -= len; |
} else { |
} else { |
nopages: |
nopages: |
|
SOSEND_COUNTER_INCR(&sosend_copy_small); |
len = lmin(lmin(mlen, resid), space); |
len = lmin(lmin(mlen, resid), space); |
space -= len; |
space -= len; |
/* |
/* |
|
|
if (atomic && top == 0 && len < mlen) |
if (atomic && top == 0 && len < mlen) |
MH_ALIGN(m, len); |
MH_ALIGN(m, len); |
} |
} |
error = uiomove(mtod(m, caddr_t), (int)len, |
error = uiomove(mtod(m, void *), (int)len, |
uio); |
uio); |
|
have_data: |
resid = uio->uio_resid; |
resid = uio->uio_resid; |
m->m_len = len; |
m->m_len = len; |
*mp = m; |
*mp = m; |
|
|
break; |
break; |
} |
} |
} while (space > 0 && atomic); |
} while (space > 0 && atomic); |
|
|
s = splsoftnet(); |
s = splsoftnet(); |
|
|
if (so->so_state & SS_CANTSENDMORE) |
if (so->so_state & SS_CANTSENDMORE) |
|
|
so->so_state |= SS_MORETOCOME; |
so->so_state |= SS_MORETOCOME; |
error = (*so->so_proto->pr_usrreq)(so, |
error = (*so->so_proto->pr_usrreq)(so, |
(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, |
(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, |
top, addr, control, p); |
top, addr, control, curlwp); /* XXX */ |
if (dontroute) |
if (dontroute) |
so->so_options &= ~SO_DONTROUTE; |
so->so_options &= ~SO_DONTROUTE; |
if (resid > 0) |
if (resid > 0) |
|
|
soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, |
soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, |
struct mbuf **mp0, struct mbuf **controlp, int *flagsp) |
struct mbuf **mp0, struct mbuf **controlp, int *flagsp) |
{ |
{ |
|
struct lwp *l = curlwp; |
struct mbuf *m, **mp; |
struct mbuf *m, **mp; |
int flags, len, error, s, offset, moff, type, orig_resid; |
int flags, len, error, s, offset, moff, type, orig_resid; |
struct protosw *pr; |
const struct protosw *pr; |
struct mbuf *nextrecord; |
struct mbuf *nextrecord; |
|
int mbuf_removed = 0; |
|
|
pr = so->so_proto; |
pr = so->so_proto; |
mp = mp0; |
mp = mp0; |
type = 0; |
type = 0; |
orig_resid = uio->uio_resid; |
orig_resid = uio->uio_resid; |
|
|
if (paddr) |
if (paddr) |
*paddr = 0; |
*paddr = 0; |
if (controlp) |
if (controlp) |
Line 553 soreceive(struct socket *so, struct mbuf |
|
Line 1032 soreceive(struct socket *so, struct mbuf |
|
flags = *flagsp &~ MSG_EOR; |
flags = *flagsp &~ MSG_EOR; |
else |
else |
flags = 0; |
flags = 0; |
|
|
|
if ((flags & MSG_DONTWAIT) == 0) |
|
sodopendfree(); |
|
|
if (flags & MSG_OOB) { |
if (flags & MSG_OOB) { |
m = m_get(M_WAIT, MT_DATA); |
m = m_get(M_WAIT, MT_DATA); |
error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, |
error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, |
(struct mbuf *)(long)(flags & MSG_PEEK), (struct mbuf *)0, |
(struct mbuf *)(long)(flags & MSG_PEEK), NULL, l); |
(struct proc *)0); |
|
if (error) |
if (error) |
goto bad; |
goto bad; |
do { |
do { |
error = uiomove(mtod(m, caddr_t), |
error = uiomove(mtod(m, void *), |
(int) min(uio->uio_resid, m->m_len), uio); |
(int) min(uio->uio_resid, m->m_len), uio); |
m = m_free(m); |
m = m_free(m); |
} while (uio->uio_resid && error == 0 && m); |
} while (uio->uio_resid && error == 0 && m); |
Line 571 soreceive(struct socket *so, struct mbuf |
|
Line 1053 soreceive(struct socket *so, struct mbuf |
|
return (error); |
return (error); |
} |
} |
if (mp) |
if (mp) |
*mp = (struct mbuf *)0; |
*mp = NULL; |
if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) |
if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, l); |
(struct mbuf *)0, (struct mbuf *)0, (struct proc *)0); |
|
|
|
restart: |
restart: |
if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) |
if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) |
Line 632 soreceive(struct socket *so, struct mbuf |
|
Line 1113 soreceive(struct socket *so, struct mbuf |
|
error = EWOULDBLOCK; |
error = EWOULDBLOCK; |
goto release; |
goto release; |
} |
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
splx(s); |
splx(s); |
Line 640 soreceive(struct socket *so, struct mbuf |
|
Line 1123 soreceive(struct socket *so, struct mbuf |
|
goto restart; |
goto restart; |
} |
} |
dontblock: |
dontblock: |
#ifdef notyet /* XXXX */ |
/* |
if (uio->uio_procp) |
* On entry here, m points to the first record of the socket buffer. |
uio->uio_procp->p_stats->p_ru.ru_msgrcv++; |
* While we process the initial mbufs containing address and control |
#endif |
* info, we save a copy of m->m_nextpkt into nextrecord. |
|
*/ |
|
if (l) |
|
l->l_proc->p_stats->p_ru.ru_msgrcv++; |
|
KASSERT(m == so->so_rcv.sb_mb); |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); |
nextrecord = m->m_nextpkt; |
nextrecord = m->m_nextpkt; |
if (pr->pr_flags & PR_ADDR) { |
if (pr->pr_flags & PR_ADDR) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
Line 657 soreceive(struct socket *so, struct mbuf |
|
Line 1146 soreceive(struct socket *so, struct mbuf |
|
m = m->m_next; |
m = m->m_next; |
} else { |
} else { |
sbfree(&so->so_rcv, m); |
sbfree(&so->so_rcv, m); |
|
mbuf_removed = 1; |
if (paddr) { |
if (paddr) { |
*paddr = m; |
*paddr = m; |
so->so_rcv.sb_mb = m->m_next; |
so->so_rcv.sb_mb = m->m_next; |
Line 675 soreceive(struct socket *so, struct mbuf |
|
Line 1165 soreceive(struct socket *so, struct mbuf |
|
m = m->m_next; |
m = m->m_next; |
} else { |
} else { |
sbfree(&so->so_rcv, m); |
sbfree(&so->so_rcv, m); |
|
mbuf_removed = 1; |
if (controlp) { |
if (controlp) { |
if (pr->pr_domain->dom_externalize && |
struct domain *dom = pr->pr_domain; |
|
if (dom->dom_externalize && l && |
mtod(m, struct cmsghdr *)->cmsg_type == |
mtod(m, struct cmsghdr *)->cmsg_type == |
SCM_RIGHTS) |
SCM_RIGHTS) |
error = (*pr->pr_domain->dom_externalize)(m); |
error = (*dom->dom_externalize)(m, l); |
*controlp = m; |
*controlp = m; |
so->so_rcv.sb_mb = m->m_next; |
so->so_rcv.sb_mb = m->m_next; |
m->m_next = 0; |
m->m_next = 0; |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} else { |
} else { |
|
/* |
|
* Dispose of any SCM_RIGHTS message that went |
|
* through the read path rather than recv. |
|
*/ |
|
if (pr->pr_domain->dom_dispose && |
|
mtod(m, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) |
|
(*pr->pr_domain->dom_dispose)(m); |
MFREE(m, so->so_rcv.sb_mb); |
MFREE(m, so->so_rcv.sb_mb); |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} |
} |
Line 694 soreceive(struct socket *so, struct mbuf |
|
Line 1193 soreceive(struct socket *so, struct mbuf |
|
controlp = &(*controlp)->m_next; |
controlp = &(*controlp)->m_next; |
} |
} |
} |
} |
|
|
|
/* |
|
* If m is non-NULL, we have some data to read. From now on, |
|
* make sure to keep sb_lastrecord consistent when working on |
|
* the last packet on the chain (nextrecord == NULL) and we |
|
* change m->m_nextpkt. |
|
*/ |
if (m) { |
if (m) { |
if ((flags & MSG_PEEK) == 0) |
if ((flags & MSG_PEEK) == 0) { |
m->m_nextpkt = nextrecord; |
m->m_nextpkt = nextrecord; |
|
/* |
|
* If nextrecord == NULL (this is a single chain), |
|
* then sb_lastrecord may not be valid here if m |
|
* was changed earlier. |
|
*/ |
|
if (nextrecord == NULL) { |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
so->so_rcv.sb_lastrecord = m; |
|
} |
|
} |
type = m->m_type; |
type = m->m_type; |
if (type == MT_OOBDATA) |
if (type == MT_OOBDATA) |
flags |= MSG_OOB; |
flags |= MSG_OOB; |
|
} else { |
|
if ((flags & MSG_PEEK) == 0) { |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
so->so_rcv.sb_mb = nextrecord; |
|
SB_EMPTY_FIXUP(&so->so_rcv); |
|
} |
} |
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); |
|
|
moff = 0; |
moff = 0; |
offset = 0; |
offset = 0; |
while (m && uio->uio_resid > 0 && error == 0) { |
while (m && uio->uio_resid > 0 && error == 0) { |
Line 728 soreceive(struct socket *so, struct mbuf |
|
Line 1253 soreceive(struct socket *so, struct mbuf |
|
* block interrupts again. |
* block interrupts again. |
*/ |
*/ |
if (mp == 0) { |
if (mp == 0) { |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); |
splx(s); |
splx(s); |
error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); |
error = uiomove(mtod(m, char *) + moff, (int)len, uio); |
s = splsoftnet(); |
s = splsoftnet(); |
if (error) |
if (error) { |
|
/* |
|
* If any part of the record has been removed |
|
* (such as the MT_SONAME mbuf, which will |
|
* happen when PR_ADDR, and thus also |
|
* PR_ATOMIC, is set), then drop the entire |
|
* record to maintain the atomicity of the |
|
* receive operation. |
|
* |
|
* This avoids a later panic("receive 1a") |
|
* when compiled with DIAGNOSTIC. |
|
*/ |
|
if (m && mbuf_removed |
|
&& (pr->pr_flags & PR_ATOMIC)) |
|
(void) sbdroprecord(&so->so_rcv); |
|
|
goto release; |
goto release; |
|
} |
} else |
} else |
uio->uio_resid -= len; |
uio->uio_resid -= len; |
if (len == m->m_len - moff) { |
if (len == m->m_len - moff) { |
Line 748 soreceive(struct socket *so, struct mbuf |
|
Line 1291 soreceive(struct socket *so, struct mbuf |
|
*mp = m; |
*mp = m; |
mp = &m->m_next; |
mp = &m->m_next; |
so->so_rcv.sb_mb = m = m->m_next; |
so->so_rcv.sb_mb = m = m->m_next; |
*mp = (struct mbuf *)0; |
*mp = NULL; |
} else { |
} else { |
MFREE(m, so->so_rcv.sb_mb); |
MFREE(m, so->so_rcv.sb_mb); |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} |
} |
if (m) |
/* |
|
* If m != NULL, we also know that |
|
* so->so_rcv.sb_mb != NULL. |
|
*/ |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
if (m) { |
m->m_nextpkt = nextrecord; |
m->m_nextpkt = nextrecord; |
|
if (nextrecord == NULL) |
|
so->so_rcv.sb_lastrecord = m; |
|
} else { |
|
so->so_rcv.sb_mb = nextrecord; |
|
SB_EMPTY_FIXUP(&so->so_rcv); |
|
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); |
} |
} |
} else { |
} else { |
if (flags & MSG_PEEK) |
if (flags & MSG_PEEK) |
Line 793 soreceive(struct socket *so, struct mbuf |
|
Line 1349 soreceive(struct socket *so, struct mbuf |
|
!sosendallatonce(so) && !nextrecord) { |
!sosendallatonce(so) && !nextrecord) { |
if (so->so_error || so->so_state & SS_CANTRCVMORE) |
if (so->so_error || so->so_state & SS_CANTRCVMORE) |
break; |
break; |
|
/* |
|
* If we are peeking and the socket receive buffer is |
|
* full, stop since we can't get more data to peek at. |
|
*/ |
|
if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0) |
|
break; |
|
/* |
|
* If we've drained the socket buffer, tell the |
|
* protocol in case it needs to do something to |
|
* get it filled again. |
|
*/ |
|
if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) |
|
(*pr->pr_usrreq)(so, PRU_RCVD, |
|
NULL, (struct mbuf *)(long)flags, NULL, l); |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); |
error = sbwait(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
if (error) { |
if (error) { |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
Line 810 soreceive(struct socket *so, struct mbuf |
|
Line 1382 soreceive(struct socket *so, struct mbuf |
|
(void) sbdroprecord(&so->so_rcv); |
(void) sbdroprecord(&so->so_rcv); |
} |
} |
if ((flags & MSG_PEEK) == 0) { |
if ((flags & MSG_PEEK) == 0) { |
if (m == 0) |
if (m == 0) { |
|
/* |
|
* First part is an inline SB_EMPTY_FIXUP(). Second |
|
* part makes sure sb_lastrecord is up-to-date if |
|
* there is still data in the socket buffer. |
|
*/ |
so->so_rcv.sb_mb = nextrecord; |
so->so_rcv.sb_mb = nextrecord; |
|
if (so->so_rcv.sb_mb == NULL) { |
|
so->so_rcv.sb_mbtail = NULL; |
|
so->so_rcv.sb_lastrecord = NULL; |
|
} else if (nextrecord->m_nextpkt == NULL) |
|
so->so_rcv.sb_lastrecord = nextrecord; |
|
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); |
if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(*pr->pr_usrreq)(so, PRU_RCVD, NULL, |
(struct mbuf *)(long)flags, (struct mbuf *)0, |
(struct mbuf *)(long)flags, NULL, l); |
(struct proc *)0); |
|
} |
} |
if (orig_resid == uio->uio_resid && orig_resid && |
if (orig_resid == uio->uio_resid && orig_resid && |
(flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { |
(flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { |
Line 823 soreceive(struct socket *so, struct mbuf |
|
Line 1407 soreceive(struct socket *so, struct mbuf |
|
splx(s); |
splx(s); |
goto restart; |
goto restart; |
} |
} |
|
|
if (flagsp) |
if (flagsp) |
*flagsp |= flags; |
*flagsp |= flags; |
release: |
release: |
Line 835 soreceive(struct socket *so, struct mbuf |
|
Line 1419 soreceive(struct socket *so, struct mbuf |
|
int |
int |
soshutdown(struct socket *so, int how) |
soshutdown(struct socket *so, int how) |
{ |
{ |
struct protosw *pr; |
const struct protosw *pr; |
|
|
pr = so->so_proto; |
pr = so->so_proto; |
if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) |
if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) |
Line 844 soshutdown(struct socket *so, int how) |
|
Line 1428 soshutdown(struct socket *so, int how) |
|
if (how == SHUT_RD || how == SHUT_RDWR) |
if (how == SHUT_RD || how == SHUT_RDWR) |
sorflush(so); |
sorflush(so); |
if (how == SHUT_WR || how == SHUT_RDWR) |
if (how == SHUT_WR || how == SHUT_RDWR) |
return (*pr->pr_usrreq)(so, PRU_SHUTDOWN, (struct mbuf *)0, |
return (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, |
(struct mbuf *)0, (struct mbuf *)0, (struct proc *)0); |
NULL, NULL, NULL); |
return (0); |
return (0); |
} |
} |
|
|
|
|
sorflush(struct socket *so) |
sorflush(struct socket *so) |
{ |
{ |
struct sockbuf *sb, asb; |
struct sockbuf *sb, asb; |
struct protosw *pr; |
const struct protosw *pr; |
int s; |
int s; |
|
|
sb = &so->so_rcv; |
sb = &so->so_rcv; |
Line 864 sorflush(struct socket *so) |
|
Line 1448 sorflush(struct socket *so) |
|
socantrcvmore(so); |
socantrcvmore(so); |
sbunlock(sb); |
sbunlock(sb); |
asb = *sb; |
asb = *sb; |
memset((caddr_t)sb, 0, sizeof(*sb)); |
/* |
|
* Clear most of the sockbuf structure, but leave some of the |
|
* fields valid. |
|
*/ |
|
memset(&sb->sb_startzero, 0, |
|
sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); |
splx(s); |
splx(s); |
if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) |
if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) |
(*pr->pr_domain->dom_dispose)(asb.sb_mb); |
(*pr->pr_domain->dom_dispose)(asb.sb_mb); |
sbrelease(&asb); |
sbrelease(&asb, so); |
} |
} |
|
|
int |
static int |
sosetopt(struct socket *so, int level, int optname, struct mbuf *m0) |
sosetopt1(struct socket *so, int level, int optname, struct mbuf *m) |
{ |
{ |
int error; |
int optval, val; |
struct mbuf *m; |
struct linger *l; |
|
struct sockbuf *sb; |
|
struct timeval *tv; |
|
|
|
switch (optname) { |
|
|
|
case SO_LINGER: |
|
if (m == NULL || m->m_len != sizeof(struct linger)) |
|
return EINVAL; |
|
l = mtod(m, struct linger *); |
|
if (l->l_linger < 0 || l->l_linger > USHRT_MAX || |
|
l->l_linger > (INT_MAX / hz)) |
|
return EDOM; |
|
so->so_linger = l->l_linger; |
|
if (l->l_onoff) |
|
so->so_options |= SO_LINGER; |
|
else |
|
so->so_options &= ~SO_LINGER; |
|
break; |
|
|
|
case SO_DEBUG: |
|
case SO_KEEPALIVE: |
|
case SO_DONTROUTE: |
|
case SO_USELOOPBACK: |
|
case SO_BROADCAST: |
|
case SO_REUSEADDR: |
|
case SO_REUSEPORT: |
|
case SO_OOBINLINE: |
|
case SO_TIMESTAMP: |
|
if (m == NULL || m->m_len < sizeof(int)) |
|
return EINVAL; |
|
if (*mtod(m, int *)) |
|
so->so_options |= optname; |
|
else |
|
so->so_options &= ~optname; |
|
break; |
|
|
|
case SO_SNDBUF: |
|
case SO_RCVBUF: |
|
case SO_SNDLOWAT: |
|
case SO_RCVLOWAT: |
|
if (m == NULL || m->m_len < sizeof(int)) |
|
return EINVAL; |
|
|
error = 0; |
/* |
m = m0; |
* Values < 1 make no sense for any of these |
if (level != SOL_SOCKET) { |
* options, so disallow them. |
if (so->so_proto && so->so_proto->pr_ctloutput) |
*/ |
return ((*so->so_proto->pr_ctloutput) |
optval = *mtod(m, int *); |
(PRCO_SETOPT, so, level, optname, &m0)); |
if (optval < 1) |
error = ENOPROTOOPT; |
return EINVAL; |
} else { |
|
switch (optname) { |
|
|
|
case SO_LINGER: |
|
if (m == NULL || m->m_len != sizeof(struct linger)) { |
|
error = EINVAL; |
|
goto bad; |
|
} |
|
so->so_linger = mtod(m, struct linger *)->l_linger; |
|
/* fall thru... */ |
|
|
|
case SO_DEBUG: |
switch (optname) { |
case SO_KEEPALIVE: |
|
case SO_DONTROUTE: |
|
case SO_USELOOPBACK: |
|
case SO_BROADCAST: |
|
case SO_REUSEADDR: |
|
case SO_REUSEPORT: |
|
case SO_OOBINLINE: |
|
case SO_TIMESTAMP: |
|
if (m == NULL || m->m_len < sizeof(int)) { |
|
error = EINVAL; |
|
goto bad; |
|
} |
|
if (*mtod(m, int *)) |
|
so->so_options |= optname; |
|
else |
|
so->so_options &= ~optname; |
|
break; |
|
|
|
case SO_SNDBUF: |
case SO_SNDBUF: |
case SO_RCVBUF: |
case SO_RCVBUF: |
|
sb = (optname == SO_SNDBUF) ? |
|
&so->so_snd : &so->so_rcv; |
|
if (sbreserve(sb, (u_long)optval, so) == 0) |
|
return ENOBUFS; |
|
sb->sb_flags &= ~SB_AUTOSIZE; |
|
break; |
|
|
|
/* |
|
* Make sure the low-water is never greater than |
|
* the high-water. |
|
*/ |
case SO_SNDLOWAT: |
case SO_SNDLOWAT: |
|
so->so_snd.sb_lowat = |
|
(optval > so->so_snd.sb_hiwat) ? |
|
so->so_snd.sb_hiwat : optval; |
|
break; |
case SO_RCVLOWAT: |
case SO_RCVLOWAT: |
{ |
so->so_rcv.sb_lowat = |
int optval; |
(optval > so->so_rcv.sb_hiwat) ? |
|
so->so_rcv.sb_hiwat : optval; |
if (m == NULL || m->m_len < sizeof(int)) { |
break; |
error = EINVAL; |
} |
goto bad; |
break; |
} |
|
|
|
/* |
|
* Values < 1 make no sense for any of these |
|
* options, so disallow them. |
|
*/ |
|
optval = *mtod(m, int *); |
|
if (optval < 1) { |
|
error = EINVAL; |
|
goto bad; |
|
} |
|
|
|
switch (optname) { |
|
|
|
case SO_SNDBUF: |
case SO_SNDTIMEO: |
case SO_RCVBUF: |
case SO_RCVTIMEO: |
if (sbreserve(optname == SO_SNDBUF ? |
if (m == NULL || m->m_len < sizeof(*tv)) |
&so->so_snd : &so->so_rcv, |
return EINVAL; |
(u_long) optval) == 0) { |
tv = mtod(m, struct timeval *); |
error = ENOBUFS; |
if (tv->tv_sec > (INT_MAX - tv->tv_usec / tick) / hz) |
goto bad; |
return EDOM; |
} |
val = tv->tv_sec * hz + tv->tv_usec / tick; |
break; |
if (val == 0 && tv->tv_usec != 0) |
|
val = 1; |
|
|
/* |
switch (optname) { |
* Make sure the low-water is never greater than |
|
* the high-water. |
|
*/ |
|
case SO_SNDLOWAT: |
|
so->so_snd.sb_lowat = |
|
(optval > so->so_snd.sb_hiwat) ? |
|
so->so_snd.sb_hiwat : optval; |
|
break; |
|
case SO_RCVLOWAT: |
|
so->so_rcv.sb_lowat = |
|
(optval > so->so_rcv.sb_hiwat) ? |
|
so->so_rcv.sb_hiwat : optval; |
|
break; |
|
} |
|
break; |
|
} |
|
|
|
case SO_SNDTIMEO: |
case SO_SNDTIMEO: |
|
so->so_snd.sb_timeo = val; |
|
break; |
case SO_RCVTIMEO: |
case SO_RCVTIMEO: |
{ |
so->so_rcv.sb_timeo = val; |
struct timeval *tv; |
break; |
short val; |
} |
|
break; |
|
|
if (m == NULL || m->m_len < sizeof(*tv)) { |
default: |
error = EINVAL; |
return ENOPROTOOPT; |
goto bad; |
} |
} |
return 0; |
tv = mtod(m, struct timeval *); |
} |
if (tv->tv_sec * hz + tv->tv_usec / tick > SHRT_MAX) { |
|
error = EDOM; |
|
goto bad; |
|
} |
|
val = tv->tv_sec * hz + tv->tv_usec / tick; |
|
|
|
switch (optname) { |
int |
|
sosetopt(struct socket *so, int level, int optname, struct mbuf *m) |
|
{ |
|
int error, prerr; |
|
|
case SO_SNDTIMEO: |
if (level == SOL_SOCKET) |
so->so_snd.sb_timeo = val; |
error = sosetopt1(so, level, optname, m); |
break; |
else |
case SO_RCVTIMEO: |
error = ENOPROTOOPT; |
so->so_rcv.sb_timeo = val; |
|
break; |
|
} |
|
break; |
|
} |
|
|
|
default: |
if ((error == 0 || error == ENOPROTOOPT) && |
error = ENOPROTOOPT; |
so->so_proto != NULL && so->so_proto->pr_ctloutput != NULL) { |
break; |
/* give the protocol stack a shot */ |
} |
prerr = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, level, |
if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { |
optname, &m); |
(void) ((*so->so_proto->pr_ctloutput) |
if (prerr == 0) |
(PRCO_SETOPT, so, level, optname, &m0)); |
error = 0; |
m = NULL; /* freed by protocol */ |
else if (prerr != ENOPROTOOPT) |
} |
error = prerr; |
} |
} else if (m != NULL) |
bad: |
(void)m_free(m); |
if (m) |
return error; |
(void) m_free(m); |
|
return (error); |
|
} |
} |
|
|
int |
int |
Line 1031 sogetopt(struct socket *so, int level, i |
|
Line 1616 sogetopt(struct socket *so, int level, i |
|
case SO_LINGER: |
case SO_LINGER: |
m->m_len = sizeof(struct linger); |
m->m_len = sizeof(struct linger); |
mtod(m, struct linger *)->l_onoff = |
mtod(m, struct linger *)->l_onoff = |
so->so_options & SO_LINGER; |
(so->so_options & SO_LINGER) ? 1 : 0; |
mtod(m, struct linger *)->l_linger = so->so_linger; |
mtod(m, struct linger *)->l_linger = so->so_linger; |
break; |
break; |
|
|
Line 1044 sogetopt(struct socket *so, int level, i |
|
Line 1629 sogetopt(struct socket *so, int level, i |
|
case SO_BROADCAST: |
case SO_BROADCAST: |
case SO_OOBINLINE: |
case SO_OOBINLINE: |
case SO_TIMESTAMP: |
case SO_TIMESTAMP: |
*mtod(m, int *) = so->so_options & optname; |
*mtod(m, int *) = (so->so_options & optname) ? 1 : 0; |
break; |
break; |
|
|
case SO_TYPE: |
case SO_TYPE: |
Line 1085 sogetopt(struct socket *so, int level, i |
|
Line 1670 sogetopt(struct socket *so, int level, i |
|
break; |
break; |
} |
} |
|
|
|
case SO_OVERFLOWED: |
|
*mtod(m, int *) = so->so_rcv.sb_overflowed; |
|
break; |
|
|
default: |
default: |
(void)m_free(m); |
(void)m_free(m); |
return (ENOPROTOOPT); |
return (ENOPROTOOPT); |
Line 1097 sogetopt(struct socket *so, int level, i |
|
Line 1686 sogetopt(struct socket *so, int level, i |
|
void |
void |
sohasoutofband(struct socket *so) |
sohasoutofband(struct socket *so) |
{ |
{ |
struct proc *p; |
fownsignal(so->so_pgid, SIGURG, POLL_PRI, POLLPRI|POLLRDBAND, so); |
|
|
if (so->so_pgid < 0) |
|
gsignal(-so->so_pgid, SIGURG); |
|
else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) |
|
psignal(p, SIGURG); |
|
selwakeup(&so->so_rcv.sb_sel); |
selwakeup(&so->so_rcv.sb_sel); |
} |
} |
|
|
|
static void |
|
filt_sordetach(struct knote *kn) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext); |
|
if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist)) |
|
so->so_rcv.sb_flags &= ~SB_KNOTE; |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_soread(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
kn->kn_data = so->so_rcv.sb_cc; |
|
if (so->so_state & SS_CANTRCVMORE) { |
|
kn->kn_flags |= EV_EOF; |
|
kn->kn_fflags = so->so_error; |
|
return (1); |
|
} |
|
if (so->so_error) /* temporary udp error */ |
|
return (1); |
|
if (kn->kn_sfflags & NOTE_LOWAT) |
|
return (kn->kn_data >= kn->kn_sdata); |
|
return (kn->kn_data >= so->so_rcv.sb_lowat); |
|
} |
|
|
|
static void |
|
filt_sowdetach(struct knote *kn) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext); |
|
if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist)) |
|
so->so_snd.sb_flags &= ~SB_KNOTE; |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_sowrite(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
kn->kn_data = sbspace(&so->so_snd); |
|
if (so->so_state & SS_CANTSENDMORE) { |
|
kn->kn_flags |= EV_EOF; |
|
kn->kn_fflags = so->so_error; |
|
return (1); |
|
} |
|
if (so->so_error) /* temporary udp error */ |
|
return (1); |
|
if (((so->so_state & SS_ISCONNECTED) == 0) && |
|
(so->so_proto->pr_flags & PR_CONNREQUIRED)) |
|
return (0); |
|
if (kn->kn_sfflags & NOTE_LOWAT) |
|
return (kn->kn_data >= kn->kn_sdata); |
|
return (kn->kn_data >= so->so_snd.sb_lowat); |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_solisten(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
|
|
/* |
|
* Set kn_data to number of incoming connections, not |
|
* counting partial (incomplete) connections. |
|
*/ |
|
kn->kn_data = so->so_qlen; |
|
return (kn->kn_data > 0); |
|
} |
|
|
|
static const struct filterops solisten_filtops = |
|
{ 1, NULL, filt_sordetach, filt_solisten }; |
|
static const struct filterops soread_filtops = |
|
{ 1, NULL, filt_sordetach, filt_soread }; |
|
static const struct filterops sowrite_filtops = |
|
{ 1, NULL, filt_sowdetach, filt_sowrite }; |
|
|
|
int |
|
soo_kqfilter(struct file *fp, struct knote *kn) |
|
{ |
|
struct socket *so; |
|
struct sockbuf *sb; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
switch (kn->kn_filter) { |
|
case EVFILT_READ: |
|
if (so->so_options & SO_ACCEPTCONN) |
|
kn->kn_fop = &solisten_filtops; |
|
else |
|
kn->kn_fop = &soread_filtops; |
|
sb = &so->so_rcv; |
|
break; |
|
case EVFILT_WRITE: |
|
kn->kn_fop = &sowrite_filtops; |
|
sb = &so->so_snd; |
|
break; |
|
default: |
|
return (1); |
|
} |
|
SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext); |
|
sb->sb_flags |= SB_KNOTE; |
|
return (0); |
|
} |
|
|
|
#include <sys/sysctl.h> |
|
|
|
static int sysctl_kern_somaxkva(SYSCTLFN_PROTO); |
|
|
|
/* |
|
* sysctl helper routine for kern.somaxkva. ensures that the given |
|
* value is not too small. |
|
* (XXX should we maybe make sure it's not too large as well?) |
|
*/ |
|
static int |
|
sysctl_kern_somaxkva(SYSCTLFN_ARGS) |
|
{ |
|
int error, new_somaxkva; |
|
struct sysctlnode node; |
|
|
|
new_somaxkva = somaxkva; |
|
node = *rnode; |
|
node.sysctl_data = &new_somaxkva; |
|
error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
|
if (error || newp == NULL) |
|
return (error); |
|
|
|
if (new_somaxkva < (16 * 1024 * 1024)) /* sanity */ |
|
return (EINVAL); |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
somaxkva = new_somaxkva; |
|
cv_broadcast(&socurkva_cv); |
|
mutex_exit(&so_pendfree_lock); |
|
|
|
return (error); |
|
} |
|
|
|
SYSCTL_SETUP(sysctl_kern_somaxkva_setup, "sysctl kern.somaxkva setup") |
|
{ |
|
|
|
sysctl_createv(clog, 0, NULL, NULL, |
|
CTLFLAG_PERMANENT, |
|
CTLTYPE_NODE, "kern", NULL, |
|
NULL, 0, NULL, 0, |
|
CTL_KERN, CTL_EOL); |
|
|
|
sysctl_createv(clog, 0, NULL, NULL, |
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
|
CTLTYPE_INT, "somaxkva", |
|
SYSCTL_DESCR("Maximum amount of kernel memory to be " |
|
"used for socket buffers"), |
|
sysctl_kern_somaxkva, 0, NULL, 0, |
|
CTL_KERN, KERN_SOMAXKVA, CTL_EOL); |
|
} |