version 1.6, 1993/09/08 21:12:49 |
version 1.181, 2009/01/14 23:28:23 |
|
|
/* |
/* $NetBSD$ */ |
* Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California. |
|
|
/*- |
|
* Copyright (c) 2002, 2007, 2008 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
|
* This code is derived from software contributed to The NetBSD Foundation |
|
* by Jason R. Thorpe of Wasabi Systems, Inc. |
|
* |
|
* Redistribution and use in source and binary forms, with or without |
|
* modification, are permitted provided that the following conditions |
|
* are met: |
|
* 1. Redistributions of source code must retain the above copyright |
|
* notice, this list of conditions and the following disclaimer. |
|
* 2. Redistributions in binary form must reproduce the above copyright |
|
* notice, this list of conditions and the following disclaimer in the |
|
* documentation and/or other materials provided with the distribution. |
|
* |
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
* POSSIBILITY OF SUCH DAMAGE. |
|
*/ |
|
|
|
/* |
|
* Copyright (c) 2004 The FreeBSD Foundation |
|
* Copyright (c) 2004 Robert Watson |
|
* Copyright (c) 1982, 1986, 1988, 1990, 1993 |
|
* The Regents of the University of California. All rights reserved. |
|
* |
* Redistribution and use in source and binary forms, with or without |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* modification, are permitted provided that the following conditions |
* are met: |
* are met: |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
* 2. Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* documentation and/or other materials provided with the distribution. |
* 3. All advertising materials mentioning features or use of this software |
* 3. Neither the name of the University nor the names of its contributors |
* must display the following acknowledgement: |
|
* This product includes software developed by the University of |
|
* California, Berkeley and its contributors. |
|
* 4. Neither the name of the University nor the names of its contributors |
|
* may be used to endorse or promote products derived from this software |
* may be used to endorse or promote products derived from this software |
* without specific prior written permission. |
* without specific prior written permission. |
* |
* |
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
* SUCH DAMAGE. |
* SUCH DAMAGE. |
* |
* |
* from: @(#)uipc_socket.c 7.28 (Berkeley) 5/4/91 |
* @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95 |
* $Id$ |
*/ |
|
|
|
#include <sys/cdefs.h> |
|
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include "opt_compat_netbsd.h" |
|
#include "opt_sock_counters.h" |
|
#include "opt_sosend_loan.h" |
|
#include "opt_mbuftrace.h" |
|
#include "opt_somaxkva.h" |
|
#include "opt_multiprocessor.h" /* XXX */ |
|
|
|
#include <sys/param.h> |
|
#include <sys/systm.h> |
|
#include <sys/proc.h> |
|
#include <sys/file.h> |
|
#include <sys/filedesc.h> |
|
#include <sys/kmem.h> |
|
#include <sys/mbuf.h> |
|
#include <sys/domain.h> |
|
#include <sys/kernel.h> |
|
#include <sys/protosw.h> |
|
#include <sys/socket.h> |
|
#include <sys/socketvar.h> |
|
#include <sys/signalvar.h> |
|
#include <sys/resourcevar.h> |
|
#include <sys/uidinfo.h> |
|
#include <sys/event.h> |
|
#include <sys/poll.h> |
|
#include <sys/kauth.h> |
|
#include <sys/mutex.h> |
|
#include <sys/condvar.h> |
|
|
|
#ifdef COMPAT_50 |
|
#include <compat/sys/time.h> |
|
#define SO_OSNDTIMEO 0x1005 |
|
#define SO_ORCVTIMEO 0x1006 |
|
#endif |
|
|
|
#include <uvm/uvm.h> |
|
|
|
MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options"); |
|
MALLOC_DEFINE(M_SONAME, "soname", "socket name"); |
|
|
|
extern const struct fileops socketops; |
|
|
|
extern int somaxconn; /* patchable (XXX sysctl) */ |
|
int somaxconn = SOMAXCONN; |
|
kmutex_t *softnet_lock; |
|
|
|
#ifdef SOSEND_COUNTERS |
|
#include <sys/device.h> |
|
|
|
static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "loan big"); |
|
static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "copy big"); |
|
static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "copy small"); |
|
static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "kva limit"); |
|
|
|
#define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++ |
|
|
|
EVCNT_ATTACH_STATIC(sosend_loan_big); |
|
EVCNT_ATTACH_STATIC(sosend_copy_big); |
|
EVCNT_ATTACH_STATIC(sosend_copy_small); |
|
EVCNT_ATTACH_STATIC(sosend_kvalimit); |
|
#else |
|
|
|
#define SOSEND_COUNTER_INCR(ev) /* nothing */ |
|
|
|
#endif /* SOSEND_COUNTERS */ |
|
|
|
static struct callback_entry sokva_reclaimerentry; |
|
|
|
#if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR) |
|
int sock_loan_thresh = -1; |
|
#else |
|
int sock_loan_thresh = 4096; |
|
#endif |
|
|
|
static kmutex_t so_pendfree_lock; |
|
static struct mbuf *so_pendfree; |
|
|
|
#ifndef SOMAXKVA |
|
#define SOMAXKVA (16 * 1024 * 1024) |
|
#endif |
|
int somaxkva = SOMAXKVA; |
|
static int socurkva; |
|
static kcondvar_t socurkva_cv; |
|
|
|
#define SOCK_LOAN_CHUNK 65536 |
|
|
|
static size_t sodopendfree(void); |
|
static size_t sodopendfreel(void); |
|
|
|
static void sysctl_kern_somaxkva_setup(void); |
|
static struct sysctllog *socket_sysctllog; |
|
|
|
static vsize_t |
|
sokvareserve(struct socket *so, vsize_t len) |
|
{ |
|
int error; |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
while (socurkva + len > somaxkva) { |
|
size_t freed; |
|
|
|
/* |
|
* try to do pendfree. |
|
*/ |
|
|
|
freed = sodopendfreel(); |
|
|
|
/* |
|
* if some kva was freed, try again. |
|
*/ |
|
|
|
if (freed) |
|
continue; |
|
|
|
SOSEND_COUNTER_INCR(&sosend_kvalimit); |
|
error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock); |
|
if (error) { |
|
len = 0; |
|
break; |
|
} |
|
} |
|
socurkva += len; |
|
mutex_exit(&so_pendfree_lock); |
|
return len; |
|
} |
|
|
|
static void |
|
sokvaunreserve(vsize_t len) |
|
{ |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
socurkva -= len; |
|
cv_broadcast(&socurkva_cv); |
|
mutex_exit(&so_pendfree_lock); |
|
} |
|
|
|
/* |
|
* sokvaalloc: allocate kva for loan. |
|
*/ |
|
|
|
vaddr_t |
|
sokvaalloc(vsize_t len, struct socket *so) |
|
{ |
|
vaddr_t lva; |
|
|
|
/* |
|
* reserve kva. |
|
*/ |
|
|
|
if (sokvareserve(so, len) == 0) |
|
return 0; |
|
|
|
/* |
|
* allocate kva. |
|
*/ |
|
|
|
lva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); |
|
if (lva == 0) { |
|
sokvaunreserve(len); |
|
return (0); |
|
} |
|
|
|
return lva; |
|
} |
|
|
|
/* |
|
* sokvafree: free kva for loan. |
|
*/ |
|
|
|
void |
|
sokvafree(vaddr_t sva, vsize_t len) |
|
{ |
|
|
|
/* |
|
* free kva. |
|
*/ |
|
|
|
uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY); |
|
|
|
/* |
|
* unreserve kva. |
|
*/ |
|
|
|
sokvaunreserve(len); |
|
} |
|
|
|
static void |
|
sodoloanfree(struct vm_page **pgs, void *buf, size_t size) |
|
{ |
|
vaddr_t sva, eva; |
|
vsize_t len; |
|
int npgs; |
|
|
|
KASSERT(pgs != NULL); |
|
|
|
eva = round_page((vaddr_t) buf + size); |
|
sva = trunc_page((vaddr_t) buf); |
|
len = eva - sva; |
|
npgs = len >> PAGE_SHIFT; |
|
|
|
pmap_kremove(sva, len); |
|
pmap_update(pmap_kernel()); |
|
uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE); |
|
sokvafree(sva, len); |
|
} |
|
|
|
static size_t |
|
sodopendfree(void) |
|
{ |
|
size_t rv; |
|
|
|
if (__predict_true(so_pendfree == NULL)) |
|
return 0; |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
rv = sodopendfreel(); |
|
mutex_exit(&so_pendfree_lock); |
|
|
|
return rv; |
|
} |
|
|
|
/* |
|
* sodopendfreel: free mbufs on "pendfree" list. |
|
* unlock and relock so_pendfree_lock when freeing mbufs. |
|
* |
|
* => called with so_pendfree_lock held. |
*/ |
*/ |
|
|
#include "param.h" |
static size_t |
#include "systm.h" |
sodopendfreel(void) |
#include "proc.h" |
{ |
#include "file.h" |
struct mbuf *m, *next; |
#include "malloc.h" |
size_t rv = 0; |
#include "mbuf.h" |
|
#include "domain.h" |
KASSERT(mutex_owned(&so_pendfree_lock)); |
#include "kernel.h" |
|
#include "select.h" |
while (so_pendfree != NULL) { |
#include "protosw.h" |
m = so_pendfree; |
#include "socket.h" |
so_pendfree = NULL; |
#include "socketvar.h" |
mutex_exit(&so_pendfree_lock); |
#include "resourcevar.h" |
|
|
for (; m != NULL; m = next) { |
|
next = m->m_next; |
|
KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0); |
|
KASSERT(m->m_ext.ext_refcnt == 0); |
|
|
|
rv += m->m_ext.ext_size; |
|
sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf, |
|
m->m_ext.ext_size); |
|
pool_cache_put(mb_cache, m); |
|
} |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
} |
|
|
|
return (rv); |
|
} |
|
|
|
void |
|
soloanfree(struct mbuf *m, void *buf, size_t size, void *arg) |
|
{ |
|
|
|
KASSERT(m != NULL); |
|
|
|
/* |
|
* postpone freeing mbuf. |
|
* |
|
* we can't do it in interrupt context |
|
* because we need to put kva back to kernel_map. |
|
*/ |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
m->m_next = so_pendfree; |
|
so_pendfree = m; |
|
cv_broadcast(&socurkva_cv); |
|
mutex_exit(&so_pendfree_lock); |
|
} |
|
|
|
static long |
|
sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space) |
|
{ |
|
struct iovec *iov = uio->uio_iov; |
|
vaddr_t sva, eva; |
|
vsize_t len; |
|
vaddr_t lva; |
|
int npgs, error; |
|
vaddr_t va; |
|
int i; |
|
|
|
if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) |
|
return (0); |
|
|
|
if (iov->iov_len < (size_t) space) |
|
space = iov->iov_len; |
|
if (space > SOCK_LOAN_CHUNK) |
|
space = SOCK_LOAN_CHUNK; |
|
|
|
eva = round_page((vaddr_t) iov->iov_base + space); |
|
sva = trunc_page((vaddr_t) iov->iov_base); |
|
len = eva - sva; |
|
npgs = len >> PAGE_SHIFT; |
|
|
|
KASSERT(npgs <= M_EXT_MAXPAGES); |
|
|
|
lva = sokvaalloc(len, so); |
|
if (lva == 0) |
|
return 0; |
|
|
|
error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len, |
|
m->m_ext.ext_pgs, UVM_LOAN_TOPAGE); |
|
if (error) { |
|
sokvafree(lva, len); |
|
return (0); |
|
} |
|
|
|
for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE) |
|
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]), |
|
VM_PROT_READ); |
|
pmap_update(pmap_kernel()); |
|
|
|
lva += (vaddr_t) iov->iov_base & PAGE_MASK; |
|
|
|
MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so); |
|
m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP; |
|
|
|
uio->uio_resid -= space; |
|
/* uio_offset not updated, not set/used for write(2) */ |
|
uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space; |
|
uio->uio_iov->iov_len -= space; |
|
if (uio->uio_iov->iov_len == 0) { |
|
uio->uio_iov++; |
|
uio->uio_iovcnt--; |
|
} |
|
|
|
return (space); |
|
} |
|
|
|
static int |
|
sokva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) |
|
{ |
|
|
|
KASSERT(ce == &sokva_reclaimerentry); |
|
KASSERT(obj == NULL); |
|
|
|
sodopendfree(); |
|
if (!vm_map_starved_p(kernel_map)) { |
|
return CALLBACK_CHAIN_ABORT; |
|
} |
|
return CALLBACK_CHAIN_CONTINUE; |
|
} |
|
|
|
struct mbuf * |
|
getsombuf(struct socket *so, int type) |
|
{ |
|
struct mbuf *m; |
|
|
|
m = m_get(M_WAIT, type); |
|
MCLAIM(m, so->so_mowner); |
|
return m; |
|
} |
|
|
|
void |
|
soinit(void) |
|
{ |
|
|
|
sysctl_kern_somaxkva_setup(); |
|
|
|
mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM); |
|
softnet_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); |
|
cv_init(&socurkva_cv, "sokva"); |
|
soinit2(); |
|
|
|
/* Set the initial adjusted socket buffer size. */ |
|
if (sb_max_set(sb_max)) |
|
panic("bad initial sb_max value: %lu", sb_max); |
|
|
|
callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback, |
|
&sokva_reclaimerentry, NULL, sokva_reclaim_callback); |
|
} |
|
|
/* |
/* |
* Socket operation routines. |
* Socket operation routines. |
|
|
*/ |
*/ |
/*ARGSUSED*/ |
/*ARGSUSED*/ |
int |
int |
socreate(dom, aso, type, proto) |
socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l, |
struct socket **aso; |
struct socket *lockso) |
register int type; |
{ |
int proto; |
const struct protosw *prp; |
{ |
struct socket *so; |
struct proc *p = curproc; /* XXX */ |
uid_t uid; |
register struct protosw *prp; |
int error; |
register struct socket *so; |
kmutex_t *lock; |
register int error; |
|
|
error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET, |
|
KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type), |
|
KAUTH_ARG(proto)); |
|
if (error != 0) |
|
return error; |
|
|
if (proto) |
if (proto) |
prp = pffindproto(dom, proto, type); |
prp = pffindproto(dom, proto, type); |
else |
else |
prp = pffindtype(dom, type); |
prp = pffindtype(dom, type); |
if (!prp || !prp->pr_usrreq) |
if (prp == NULL) { |
return (EPROTONOSUPPORT); |
/* no support for domain */ |
|
if (pffinddomain(dom) == 0) |
|
return EAFNOSUPPORT; |
|
/* no support for socket type */ |
|
if (proto == 0 && type != 0) |
|
return EPROTOTYPE; |
|
return EPROTONOSUPPORT; |
|
} |
|
if (prp->pr_usrreq == NULL) |
|
return EPROTONOSUPPORT; |
if (prp->pr_type != type) |
if (prp->pr_type != type) |
return (EPROTOTYPE); |
return EPROTOTYPE; |
MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT); |
|
bzero((caddr_t)so, sizeof(*so)); |
so = soget(true); |
so->so_type = type; |
so->so_type = type; |
if (p->p_ucred->cr_uid == 0) |
|
so->so_state = SS_PRIV; |
|
so->so_proto = prp; |
so->so_proto = prp; |
error = |
so->so_send = sosend; |
(*prp->pr_usrreq)(so, PRU_ATTACH, |
so->so_receive = soreceive; |
(struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0); |
#ifdef MBUFTRACE |
if (error) { |
so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner; |
|
so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner; |
|
so->so_mowner = &prp->pr_domain->dom_mowner; |
|
#endif |
|
uid = kauth_cred_geteuid(l->l_cred); |
|
so->so_uidinfo = uid_find(uid); |
|
so->so_egid = kauth_cred_getegid(l->l_cred); |
|
so->so_cpid = l->l_proc->p_pid; |
|
if (lockso != NULL) { |
|
/* Caller wants us to share a lock. */ |
|
lock = lockso->so_lock; |
|
so->so_lock = lock; |
|
mutex_obj_hold(lock); |
|
mutex_enter(lock); |
|
} else { |
|
/* Lock assigned and taken during PRU_ATTACH. */ |
|
} |
|
error = (*prp->pr_usrreq)(so, PRU_ATTACH, NULL, |
|
(struct mbuf *)(long)proto, NULL, l); |
|
KASSERT(solocked(so)); |
|
if (error != 0) { |
so->so_state |= SS_NOFDREF; |
so->so_state |= SS_NOFDREF; |
sofree(so); |
sofree(so); |
return (error); |
return error; |
} |
} |
|
sounlock(so); |
*aso = so; |
*aso = so; |
return (0); |
return 0; |
} |
} |
|
|
|
/* On success, write file descriptor to fdout and return zero. On |
|
* failure, return non-zero; *fdout will be undefined. |
|
*/ |
int |
int |
sobind(so, nam) |
fsocreate(int domain, struct socket **sop, int type, int protocol, |
struct socket *so; |
struct lwp *l, int *fdout) |
struct mbuf *nam; |
|
{ |
{ |
int s = splnet(); |
struct socket *so; |
int error; |
struct file *fp; |
|
int fd, error; |
|
|
error = |
if ((error = fd_allocfile(&fp, &fd)) != 0) |
(*so->so_proto->pr_usrreq)(so, PRU_BIND, |
return (error); |
(struct mbuf *)0, nam, (struct mbuf *)0); |
fp->f_flag = FREAD|FWRITE; |
splx(s); |
fp->f_type = DTYPE_SOCKET; |
return (error); |
fp->f_ops = &socketops; |
|
error = socreate(domain, &so, type, protocol, l, NULL); |
|
if (error != 0) { |
|
fd_abort(curproc, fp, fd); |
|
} else { |
|
if (sop != NULL) |
|
*sop = so; |
|
fp->f_data = so; |
|
fd_affix(curproc, fp, fd); |
|
*fdout = fd; |
|
} |
|
return error; |
} |
} |
|
|
int |
int |
solisten(so, backlog) |
sobind(struct socket *so, struct mbuf *nam, struct lwp *l) |
register struct socket *so; |
{ |
int backlog; |
int error; |
{ |
|
int s = splnet(), error; |
solock(so); |
|
error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, l); |
error = |
sounlock(so); |
(*so->so_proto->pr_usrreq)(so, PRU_LISTEN, |
return error; |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); |
} |
if (error) { |
|
splx(s); |
int |
return (error); |
solisten(struct socket *so, int backlog, struct lwp *l) |
|
{ |
|
int error; |
|
|
|
solock(so); |
|
if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | |
|
SS_ISDISCONNECTING)) != 0) { |
|
sounlock(so); |
|
return (EOPNOTSUPP); |
|
} |
|
error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, |
|
NULL, NULL, l); |
|
if (error != 0) { |
|
sounlock(so); |
|
return error; |
} |
} |
if (so->so_q == 0) |
if (TAILQ_EMPTY(&so->so_q)) |
so->so_options |= SO_ACCEPTCONN; |
so->so_options |= SO_ACCEPTCONN; |
if (backlog < 0) |
if (backlog < 0) |
backlog = 0; |
backlog = 0; |
so->so_qlimit = min(backlog, SOMAXCONN); |
so->so_qlimit = min(backlog, somaxconn); |
splx(s); |
sounlock(so); |
return (0); |
return 0; |
} |
} |
|
|
int |
void |
sofree(so) |
sofree(struct socket *so) |
register struct socket *so; |
|
{ |
{ |
|
u_int refs; |
|
|
|
KASSERT(solocked(so)); |
|
|
if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) |
if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { |
|
sounlock(so); |
return; |
return; |
|
} |
if (so->so_head) { |
if (so->so_head) { |
if (!soqremque(so, 0) && !soqremque(so, 1)) |
/* |
panic("sofree dq"); |
* We must not decommission a socket that's on the accept(2) |
so->so_head = 0; |
* queue. If we do, then accept(2) may hang after select(2) |
|
* indicated that the listening socket was ready. |
|
*/ |
|
if (!soqremque(so, 0)) { |
|
sounlock(so); |
|
return; |
|
} |
} |
} |
sbrelease(&so->so_snd); |
if (so->so_rcv.sb_hiwat) |
|
(void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0, |
|
RLIM_INFINITY); |
|
if (so->so_snd.sb_hiwat) |
|
(void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0, |
|
RLIM_INFINITY); |
|
sbrelease(&so->so_snd, so); |
|
KASSERT(!cv_has_waiters(&so->so_cv)); |
|
KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv)); |
|
KASSERT(!cv_has_waiters(&so->so_snd.sb_cv)); |
sorflush(so); |
sorflush(so); |
FREE(so, M_SOCKET); |
refs = so->so_aborting; /* XXX */ |
|
/* Remove acccept filter if one is present. */ |
|
if (so->so_accf != NULL) |
|
(void)accept_filt_clear(so); |
|
sounlock(so); |
|
if (refs == 0) /* XXX */ |
|
soput(so); |
} |
} |
|
|
/* |
/* |
|
|
* Free socket when disconnect complete. |
* Free socket when disconnect complete. |
*/ |
*/ |
int |
int |
soclose(so) |
soclose(struct socket *so) |
register struct socket *so; |
|
{ |
{ |
int s = splnet(); /* conservative */ |
struct socket *so2; |
int error = 0; |
int error; |
|
int error2; |
|
|
|
error = 0; |
|
solock(so); |
if (so->so_options & SO_ACCEPTCONN) { |
if (so->so_options & SO_ACCEPTCONN) { |
while (so->so_q0) |
for (;;) { |
(void) soabort(so->so_q0); |
if ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) { |
while (so->so_q) |
KASSERT(solocked2(so, so2)); |
(void) soabort(so->so_q); |
(void) soqremque(so2, 0); |
|
/* soabort drops the lock. */ |
|
(void) soabort(so2); |
|
solock(so); |
|
continue; |
|
} |
|
if ((so2 = TAILQ_FIRST(&so->so_q)) != 0) { |
|
KASSERT(solocked2(so, so2)); |
|
(void) soqremque(so2, 1); |
|
/* soabort drops the lock. */ |
|
(void) soabort(so2); |
|
solock(so); |
|
continue; |
|
} |
|
break; |
|
} |
} |
} |
if (so->so_pcb == 0) |
if (so->so_pcb == 0) |
goto discard; |
goto discard; |
|
|
goto drop; |
goto drop; |
} |
} |
if (so->so_options & SO_LINGER) { |
if (so->so_options & SO_LINGER) { |
if ((so->so_state & SS_ISDISCONNECTING) && |
if ((so->so_state & SS_ISDISCONNECTING) && so->so_nbio) |
(so->so_state & SS_NBIO)) |
|
goto drop; |
goto drop; |
while (so->so_state & SS_ISCONNECTED) |
while (so->so_state & SS_ISCONNECTED) { |
if (error = tsleep((caddr_t)&so->so_timeo, |
error = sowait(so, so->so_linger * hz); |
PSOCK | PCATCH, netcls, so->so_linger)) |
if (error) |
break; |
break; |
|
} |
} |
} |
} |
} |
drop: |
drop: |
if (so->so_pcb) { |
if (so->so_pcb) { |
int error2 = |
error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, |
(*so->so_proto->pr_usrreq)(so, PRU_DETACH, |
NULL, NULL, NULL, NULL); |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); |
|
if (error == 0) |
if (error == 0) |
error = error2; |
error = error2; |
} |
} |
discard: |
discard: |
if (so->so_state & SS_NOFDREF) |
if (so->so_state & SS_NOFDREF) |
panic("soclose: NOFDREF"); |
panic("soclose: NOFDREF"); |
so->so_state |= SS_NOFDREF; |
so->so_state |= SS_NOFDREF; |
sofree(so); |
sofree(so); |
splx(s); |
|
return (error); |
return (error); |
} |
} |
|
|
/* |
/* |
* Must be called at splnet... |
* Must be called with the socket locked.. Will return with it unlocked. |
*/ |
*/ |
int |
int |
soabort(so) |
soabort(struct socket *so) |
struct socket *so; |
|
{ |
{ |
|
u_int refs; |
return ( |
int error; |
(*so->so_proto->pr_usrreq)(so, PRU_ABORT, |
|
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); |
KASSERT(solocked(so)); |
|
KASSERT(so->so_head == NULL); |
|
|
|
so->so_aborting++; /* XXX */ |
|
error = (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, |
|
NULL, NULL, NULL); |
|
refs = --so->so_aborting; /* XXX */ |
|
if (error || (refs == 0)) { |
|
sofree(so); |
|
} else { |
|
sounlock(so); |
|
} |
|
return error; |
} |
} |
|
|
int |
int |
soaccept(so, nam) |
soaccept(struct socket *so, struct mbuf *nam) |
register struct socket *so; |
|
struct mbuf *nam; |
|
{ |
{ |
int s = splnet(); |
int error; |
int error; |
|
|
|
|
KASSERT(solocked(so)); |
|
|
|
error = 0; |
if ((so->so_state & SS_NOFDREF) == 0) |
if ((so->so_state & SS_NOFDREF) == 0) |
panic("soaccept: !NOFDREF"); |
panic("soaccept: !NOFDREF"); |
so->so_state &= ~SS_NOFDREF; |
so->so_state &= ~SS_NOFDREF; |
error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, |
if ((so->so_state & SS_ISDISCONNECTED) == 0 || |
(struct mbuf *)0, nam, (struct mbuf *)0); |
(so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) |
splx(s); |
error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, |
|
NULL, nam, NULL, NULL); |
|
else |
|
error = ECONNABORTED; |
|
|
return (error); |
return (error); |
} |
} |
|
|
int |
int |
soconnect(so, nam) |
soconnect(struct socket *so, struct mbuf *nam, struct lwp *l) |
register struct socket *so; |
|
struct mbuf *nam; |
|
{ |
{ |
int s; |
int error; |
int error; |
|
|
KASSERT(solocked(so)); |
|
|
if (so->so_options & SO_ACCEPTCONN) |
if (so->so_options & SO_ACCEPTCONN) |
return (EOPNOTSUPP); |
return (EOPNOTSUPP); |
s = splnet(); |
|
/* |
/* |
* If protocol is connection-based, can only connect once. |
* If protocol is connection-based, can only connect once. |
* Otherwise, if connected, try to disconnect first. |
* Otherwise, if connected, try to disconnect first. |
Line 254 soconnect(so, nam) |
|
Line 768 soconnect(so, nam) |
|
error = EISCONN; |
error = EISCONN; |
else |
else |
error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, |
error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, |
(struct mbuf *)0, nam, (struct mbuf *)0); |
NULL, nam, NULL, l); |
splx(s); |
|
return (error); |
return (error); |
} |
} |
|
|
int |
int |
soconnect2(so1, so2) |
soconnect2(struct socket *so1, struct socket *so2) |
register struct socket *so1; |
|
struct socket *so2; |
|
{ |
{ |
int s = splnet(); |
int error; |
int error; |
|
|
KASSERT(solocked2(so1, so2)); |
|
|
error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, |
error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, |
(struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0); |
NULL, (struct mbuf *)so2, NULL, NULL); |
splx(s); |
|
return (error); |
return (error); |
} |
} |
|
|
int |
int |
sodisconnect(so) |
sodisconnect(struct socket *so) |
register struct socket *so; |
|
{ |
{ |
int s = splnet(); |
int error; |
int error; |
|
|
KASSERT(solocked(so)); |
|
|
if ((so->so_state & SS_ISCONNECTED) == 0) { |
if ((so->so_state & SS_ISCONNECTED) == 0) { |
error = ENOTCONN; |
error = ENOTCONN; |
goto bad; |
} else if (so->so_state & SS_ISDISCONNECTING) { |
} |
|
if (so->so_state & SS_ISDISCONNECTING) { |
|
error = EALREADY; |
error = EALREADY; |
goto bad; |
} else { |
|
error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, |
|
NULL, NULL, NULL, NULL); |
} |
} |
error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, |
sodopendfree(); |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0); |
|
bad: |
|
splx(s); |
|
return (error); |
return (error); |
} |
} |
|
|
|
#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) |
/* |
/* |
* Send on a socket. |
* Send on a socket. |
* If send must go all at once and message is larger than |
* If send must go all at once and message is larger than |
|
|
* Data and control buffers are freed on return. |
* Data and control buffers are freed on return. |
*/ |
*/ |
int |
int |
sosend(so, addr, uio, top, control, flags) |
sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, |
register struct socket *so; |
struct mbuf *control, int flags, struct lwp *l) |
struct mbuf *addr; |
{ |
struct uio *uio; |
struct mbuf **mp, *m; |
struct mbuf *top; |
struct proc *p; |
struct mbuf *control; |
long space, len, resid, clen, mlen; |
int flags; |
int error, s, dontroute, atomic; |
{ |
|
struct proc *p = curproc; /* XXX */ |
p = l->l_proc; |
struct mbuf **mp; |
sodopendfree(); |
register struct mbuf *m; |
clen = 0; |
register long space, len, resid; |
|
int clen = 0, error, s, dontroute, mlen; |
|
int atomic = sosendallatonce(so) || top; |
|
|
|
|
/* |
|
* solock() provides atomicity of access. splsoftnet() prevents |
|
* protocol processing soft interrupts from interrupting us and |
|
* blocking (expensive). |
|
*/ |
|
s = splsoftnet(); |
|
solock(so); |
|
atomic = sosendallatonce(so) || top; |
if (uio) |
if (uio) |
resid = uio->uio_resid; |
resid = uio->uio_resid; |
else |
else |
resid = top->m_pkthdr.len; |
resid = top->m_pkthdr.len; |
|
/* |
|
* In theory resid should be unsigned. |
|
* However, space must be signed, as it might be less than 0 |
|
* if we over-committed, and we must use a signed comparison |
|
* of space and resid. On the other hand, a negative resid |
|
* causes us to loop sending 0-length segments to the protocol. |
|
*/ |
|
if (resid < 0) { |
|
error = EINVAL; |
|
goto out; |
|
} |
dontroute = |
dontroute = |
(flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && |
(flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && |
(so->so_proto->pr_flags & PR_ATOMIC); |
(so->so_proto->pr_flags & PR_ATOMIC); |
p->p_stats->p_ru.ru_msgsnd++; |
l->l_ru.ru_msgsnd++; |
if (control) |
if (control) |
clen = control->m_len; |
clen = control->m_len; |
#define snderr(errno) { error = errno; splx(s); goto release; } |
restart: |
|
if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0) |
restart: |
|
if (error = sblock(&so->so_snd)) |
|
goto out; |
goto out; |
do { |
do { |
s = splnet(); |
if (so->so_state & SS_CANTSENDMORE) { |
if (so->so_state & SS_CANTSENDMORE) |
error = EPIPE; |
snderr(EPIPE); |
goto release; |
if (so->so_error) |
} |
snderr(so->so_error); |
if (so->so_error) { |
|
error = so->so_error; |
|
so->so_error = 0; |
|
goto release; |
|
} |
if ((so->so_state & SS_ISCONNECTED) == 0) { |
if ((so->so_state & SS_ISCONNECTED) == 0) { |
if (so->so_proto->pr_flags & PR_CONNREQUIRED) { |
if (so->so_proto->pr_flags & PR_CONNREQUIRED) { |
if ((so->so_state & SS_ISCONFIRMING) == 0 && |
if ((so->so_state & SS_ISCONFIRMING) == 0 && |
!(resid == 0 && clen != 0)) |
!(resid == 0 && clen != 0)) { |
snderr(ENOTCONN); |
error = ENOTCONN; |
} else if (addr == 0) |
goto release; |
snderr(EDESTADDRREQ); |
} |
|
} else if (addr == 0) { |
|
error = EDESTADDRREQ; |
|
goto release; |
|
} |
} |
} |
space = sbspace(&so->so_snd); |
space = sbspace(&so->so_snd); |
if (flags & MSG_OOB) |
if (flags & MSG_OOB) |
space += 1024; |
space += 1024; |
|
if ((atomic && resid > so->so_snd.sb_hiwat) || |
|
clen > so->so_snd.sb_hiwat) { |
|
error = EMSGSIZE; |
|
goto release; |
|
} |
if (space < resid + clen && |
if (space < resid + clen && |
(atomic || space < so->so_snd.sb_lowat || space < clen)) { |
(atomic || space < so->so_snd.sb_lowat || space < clen)) { |
if (atomic && resid > so->so_snd.sb_hiwat || |
if (so->so_nbio) { |
clen > so->so_snd.sb_hiwat) |
error = EWOULDBLOCK; |
snderr(EMSGSIZE); |
goto release; |
if (so->so_state & SS_NBIO) |
} |
snderr(EWOULDBLOCK); |
|
sbunlock(&so->so_snd); |
sbunlock(&so->so_snd); |
error = sbwait(&so->so_snd); |
error = sbwait(&so->so_snd); |
splx(s); |
|
if (error) |
if (error) |
goto out; |
goto out; |
goto restart; |
goto restart; |
} |
} |
splx(s); |
|
mp = ⊤ |
mp = ⊤ |
space -= clen; |
space -= clen; |
do { |
do { |
if (uio == NULL) { |
if (uio == NULL) { |
/* |
|
* Data is prepackaged in "top". |
|
*/ |
|
resid = 0; |
|
if (flags & MSG_EOR) |
|
top->m_flags |= M_EOR; |
|
} else do { |
|
if (top == 0) { |
|
MGETHDR(m, M_WAIT, MT_DATA); |
|
mlen = MHLEN; |
|
m->m_pkthdr.len = 0; |
|
m->m_pkthdr.rcvif = (struct ifnet *)0; |
|
} else { |
|
MGET(m, M_WAIT, MT_DATA); |
|
mlen = MLEN; |
|
} |
|
if (resid >= MINCLSIZE) { |
|
MCLGET(m, M_WAIT); |
|
if ((m->m_flags & M_EXT) == 0) |
|
goto nopages; |
|
mlen = MCLBYTES; |
|
#ifdef MAPPED_MBUFS |
|
len = min(MCLBYTES, resid); |
|
#else |
|
if (top == 0) { |
|
len = min(MCLBYTES - max_hdr, resid); |
|
m->m_data += max_hdr; |
|
} else |
|
len = min(MCLBYTES, resid); |
|
#endif |
|
len = min(len, space); |
|
space -= len; |
|
} else { |
|
nopages: |
|
len = min(min(mlen, resid), space); |
|
space -= len; |
|
/* |
/* |
* For datagram protocols, leave room |
* Data is prepackaged in "top". |
* for protocol headers in first mbuf. |
|
*/ |
*/ |
if (atomic && top == 0 && len < mlen) |
resid = 0; |
MH_ALIGN(m, len); |
|
} |
|
error = uiomove(mtod(m, caddr_t), (int)len, uio); |
|
resid = uio->uio_resid; |
|
m->m_len = len; |
|
*mp = m; |
|
top->m_pkthdr.len += len; |
|
if (error) |
|
goto release; |
|
mp = &m->m_next; |
|
if (resid <= 0) { |
|
if (flags & MSG_EOR) |
if (flags & MSG_EOR) |
top->m_flags |= M_EOR; |
top->m_flags |= M_EOR; |
break; |
} else do { |
|
sounlock(so); |
|
splx(s); |
|
if (top == NULL) { |
|
m = m_gethdr(M_WAIT, MT_DATA); |
|
mlen = MHLEN; |
|
m->m_pkthdr.len = 0; |
|
m->m_pkthdr.rcvif = NULL; |
|
} else { |
|
m = m_get(M_WAIT, MT_DATA); |
|
mlen = MLEN; |
|
} |
|
MCLAIM(m, so->so_snd.sb_mowner); |
|
if (sock_loan_thresh >= 0 && |
|
uio->uio_iov->iov_len >= sock_loan_thresh && |
|
space >= sock_loan_thresh && |
|
(len = sosend_loan(so, uio, m, |
|
space)) != 0) { |
|
SOSEND_COUNTER_INCR(&sosend_loan_big); |
|
space -= len; |
|
goto have_data; |
|
} |
|
if (resid >= MINCLSIZE && space >= MCLBYTES) { |
|
SOSEND_COUNTER_INCR(&sosend_copy_big); |
|
m_clget(m, M_WAIT); |
|
if ((m->m_flags & M_EXT) == 0) |
|
goto nopages; |
|
mlen = MCLBYTES; |
|
if (atomic && top == 0) { |
|
len = lmin(MCLBYTES - max_hdr, |
|
resid); |
|
m->m_data += max_hdr; |
|
} else |
|
len = lmin(MCLBYTES, resid); |
|
space -= len; |
|
} else { |
|
nopages: |
|
SOSEND_COUNTER_INCR(&sosend_copy_small); |
|
len = lmin(lmin(mlen, resid), space); |
|
space -= len; |
|
/* |
|
* For datagram protocols, leave room |
|
* for protocol headers in first mbuf. |
|
*/ |
|
if (atomic && top == 0 && len < mlen) |
|
MH_ALIGN(m, len); |
|
} |
|
error = uiomove(mtod(m, void *), (int)len, uio); |
|
have_data: |
|
resid = uio->uio_resid; |
|
m->m_len = len; |
|
*mp = m; |
|
top->m_pkthdr.len += len; |
|
s = splsoftnet(); |
|
solock(so); |
|
if (error != 0) |
|
goto release; |
|
mp = &m->m_next; |
|
if (resid <= 0) { |
|
if (flags & MSG_EOR) |
|
top->m_flags |= M_EOR; |
|
break; |
|
} |
|
} while (space > 0 && atomic); |
|
|
|
if (so->so_state & SS_CANTSENDMORE) { |
|
error = EPIPE; |
|
goto release; |
} |
} |
} while (space > 0 && atomic); |
if (dontroute) |
if (dontroute) |
so->so_options |= SO_DONTROUTE; |
so->so_options |= SO_DONTROUTE; |
if (resid > 0) |
s = splnet(); /* XXX */ |
so->so_state |= SS_MORETOCOME; |
error = (*so->so_proto->pr_usrreq)(so, |
error = (*so->so_proto->pr_usrreq)(so, |
(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, |
(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, |
top, addr, control); |
top, addr, control, curlwp); |
splx(s); |
if (dontroute) |
if (dontroute) |
so->so_options &= ~SO_DONTROUTE; |
so->so_options &= ~SO_DONTROUTE; |
if (resid > 0) |
clen = 0; |
so->so_state &= ~SS_MORETOCOME; |
control = 0; |
clen = 0; |
top = 0; |
control = NULL; |
mp = ⊤ |
top = NULL; |
if (error) |
mp = ⊤ |
goto release; |
if (error != 0) |
|
goto release; |
} while (resid && space > 0); |
} while (resid && space > 0); |
} while (resid); |
} while (resid); |
|
|
release: |
release: |
sbunlock(&so->so_snd); |
sbunlock(&so->so_snd); |
out: |
out: |
|
sounlock(so); |
|
splx(s); |
if (top) |
if (top) |
m_freem(top); |
m_freem(top); |
if (control) |
if (control) |
|
|
} |
} |
|
|
/* |
/* |
|
* Following replacement or removal of the first mbuf on the first |
|
* mbuf chain of a socket buffer, push necessary state changes back |
|
* into the socket buffer so that other consumers see the values |
|
* consistently. 'nextrecord' is the callers locally stored value of |
|
* the original value of sb->sb_mb->m_nextpkt which must be restored |
|
* when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. |
|
*/ |
|
static void |
|
sbsync(struct sockbuf *sb, struct mbuf *nextrecord) |
|
{ |
|
|
|
KASSERT(solocked(sb->sb_so)); |
|
|
|
/* |
|
* First, update for the new value of nextrecord. If necessary, |
|
* make it the first record. |
|
*/ |
|
if (sb->sb_mb != NULL) |
|
sb->sb_mb->m_nextpkt = nextrecord; |
|
else |
|
sb->sb_mb = nextrecord; |
|
|
|
/* |
|
* Now update any dependent socket buffer fields to reflect |
|
* the new state. This is an inline of SB_EMPTY_FIXUP, with |
|
* the addition of a second clause that takes care of the |
|
* case where sb_mb has been updated, but remains the last |
|
* record. |
|
*/ |
|
if (sb->sb_mb == NULL) { |
|
sb->sb_mbtail = NULL; |
|
sb->sb_lastrecord = NULL; |
|
} else if (sb->sb_mb->m_nextpkt == NULL) |
|
sb->sb_lastrecord = sb->sb_mb; |
|
} |
|
|
|
/* |
* Implement receive operations on a socket. |
* Implement receive operations on a socket. |
* We depend on the way that records are added to the sockbuf |
* We depend on the way that records are added to the sockbuf |
* by sbappend*. In particular, each record (mbufs linked through m_next) |
* by sbappend*. In particular, each record (mbufs linked through m_next) |
|
|
* only for the count in uio_resid. |
* only for the count in uio_resid. |
*/ |
*/ |
int |
int |
soreceive(so, paddr, uio, mp0, controlp, flagsp) |
soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, |
register struct socket *so; |
struct mbuf **mp0, struct mbuf **controlp, int *flagsp) |
struct mbuf **paddr; |
{ |
struct uio *uio; |
struct lwp *l = curlwp; |
struct mbuf **mp0; |
struct mbuf *m, **mp, *mt; |
struct mbuf **controlp; |
int atomic, flags, len, error, s, offset, moff, type, orig_resid; |
int *flagsp; |
const struct protosw *pr; |
{ |
struct mbuf *nextrecord; |
struct proc *p = curproc; /* XXX */ |
int mbuf_removed = 0; |
register struct mbuf *m, **mp; |
const struct domain *dom; |
register int flags, len, error, s, offset; |
|
struct protosw *pr = so->so_proto; |
pr = so->so_proto; |
struct mbuf *nextrecord; |
atomic = pr->pr_flags & PR_ATOMIC; |
int moff, type; |
dom = pr->pr_domain; |
int orig_resid = uio->uio_resid; |
|
|
|
mp = mp0; |
mp = mp0; |
if (paddr) |
type = 0; |
*paddr = 0; |
orig_resid = uio->uio_resid; |
if (controlp) |
|
*controlp = 0; |
if (paddr != NULL) |
if (flagsp) |
*paddr = NULL; |
|
if (controlp != NULL) |
|
*controlp = NULL; |
|
if (flagsp != NULL) |
flags = *flagsp &~ MSG_EOR; |
flags = *flagsp &~ MSG_EOR; |
else |
else |
flags = 0; |
flags = 0; |
|
|
|
if ((flags & MSG_DONTWAIT) == 0) |
|
sodopendfree(); |
|
|
if (flags & MSG_OOB) { |
if (flags & MSG_OOB) { |
m = m_get(M_WAIT, MT_DATA); |
m = m_get(M_WAIT, MT_DATA); |
error = (*pr->pr_usrreq)(so, PRU_RCVOOB, |
solock(so); |
m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0); |
error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, |
|
(struct mbuf *)(long)(flags & MSG_PEEK), NULL, l); |
|
sounlock(so); |
if (error) |
if (error) |
goto bad; |
goto bad; |
do { |
do { |
error = uiomove(mtod(m, caddr_t), |
error = uiomove(mtod(m, void *), |
(int) min(uio->uio_resid, m->m_len), uio); |
(int) min(uio->uio_resid, m->m_len), uio); |
m = m_free(m); |
m = m_free(m); |
} while (uio->uio_resid && error == 0 && m); |
} while (uio->uio_resid > 0 && error == 0 && m); |
bad: |
bad: |
if (m) |
if (m != NULL) |
m_freem(m); |
m_freem(m); |
return (error); |
return error; |
} |
} |
if (mp) |
if (mp != NULL) |
*mp = (struct mbuf *)0; |
*mp = NULL; |
|
|
|
/* |
|
* solock() provides atomicity of access. splsoftnet() prevents |
|
* protocol processing soft interrupts from interrupting us and |
|
* blocking (expensive). |
|
*/ |
|
s = splsoftnet(); |
|
solock(so); |
if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) |
if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, l); |
(struct mbuf *)0, (struct mbuf *)0); |
|
|
|
restart: |
restart: |
if (error = sblock(&so->so_rcv)) |
if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) { |
return (error); |
sounlock(so); |
s = splnet(); |
splx(s); |
|
return error; |
|
} |
|
|
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
/* |
/* |
* If we have less data than requested, block awaiting more |
* If we have less data than requested, block awaiting more |
* (subject to any timeout) if: |
* (subject to any timeout) if: |
* 1. the current count is less than the low water mark, or |
* 1. the current count is less than the low water mark, |
* 2. MSG_WAITALL is set, and it is possible to do the entire |
* 2. MSG_WAITALL is set, and it is possible to do the entire |
* receive operation at once if we block (resid <= hiwat). |
* receive operation at once if we block (resid <= hiwat), or |
|
* 3. MSG_DONTWAIT is not set. |
* If MSG_WAITALL is set but resid is larger than the receive buffer, |
* If MSG_WAITALL is set but resid is larger than the receive buffer, |
* we have to do the receive in sections, and thus risk returning |
* we have to do the receive in sections, and thus risk returning |
* a short count if a timeout or signal occurs after we start. |
* a short count if a timeout or signal occurs after we start. |
*/ |
*/ |
while (m == 0 || so->so_rcv.sb_cc < uio->uio_resid && |
if (m == NULL || |
(so->so_rcv.sb_cc < so->so_rcv.sb_lowat || |
((flags & MSG_DONTWAIT) == 0 && |
((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && |
so->so_rcv.sb_cc < uio->uio_resid && |
m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0) { |
(so->so_rcv.sb_cc < so->so_rcv.sb_lowat || |
|
((flags & MSG_WAITALL) && |
|
uio->uio_resid <= so->so_rcv.sb_hiwat)) && |
|
m->m_nextpkt == NULL && !atomic)) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (m == 0 && so->so_rcv.sb_cc) |
if (m == NULL && so->so_rcv.sb_cc) |
panic("receive 1"); |
panic("receive 1"); |
#endif |
#endif |
if (so->so_error) { |
if (so->so_error) { |
if (m) |
if (m != NULL) |
break; |
goto dontblock; |
error = so->so_error; |
error = so->so_error; |
if ((flags & MSG_PEEK) == 0) |
if ((flags & MSG_PEEK) == 0) |
so->so_error = 0; |
so->so_error = 0; |
goto release; |
goto release; |
} |
} |
if (so->so_state & SS_CANTRCVMORE) { |
if (so->so_state & SS_CANTRCVMORE) { |
if (m) |
if (m != NULL) |
break; |
goto dontblock; |
else |
else |
goto release; |
goto release; |
} |
} |
for (; m; m = m->m_next) |
for (; m != NULL; m = m->m_next) |
if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { |
if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
goto dontblock; |
goto dontblock; |
|
|
} |
} |
if (uio->uio_resid == 0) |
if (uio->uio_resid == 0) |
goto release; |
goto release; |
if (so->so_state & SS_NBIO) { |
if (so->so_nbio || (flags & MSG_DONTWAIT)) { |
error = EWOULDBLOCK; |
error = EWOULDBLOCK; |
goto release; |
goto release; |
} |
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
splx(s); |
if (error != 0) { |
if (error) |
sounlock(so); |
return (error); |
splx(s); |
|
return error; |
|
} |
goto restart; |
goto restart; |
} |
} |
dontblock: |
dontblock: |
p->p_stats->p_ru.ru_msgrcv++; |
/* |
|
* On entry here, m points to the first record of the socket buffer. |
|
* From this point onward, we maintain 'nextrecord' as a cache of the |
|
* pointer to the next record in the socket buffer. We must keep the |
|
* various socket buffer pointers and local stack versions of the |
|
* pointers in sync, pushing out modifications before dropping the |
|
* socket lock, and re-reading them when picking it up. |
|
* |
|
* Otherwise, we will race with the network stack appending new data |
|
* or records onto the socket buffer by using inconsistent/stale |
|
* versions of the field, possibly resulting in socket buffer |
|
* corruption. |
|
* |
|
* By holding the high-level sblock(), we prevent simultaneous |
|
* readers from pulling off the front of the socket buffer. |
|
*/ |
|
if (l != NULL) |
|
l->l_ru.ru_msgrcv++; |
|
KASSERT(m == so->so_rcv.sb_mb); |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); |
nextrecord = m->m_nextpkt; |
nextrecord = m->m_nextpkt; |
if (pr->pr_flags & PR_ADDR) { |
if (pr->pr_flags & PR_ADDR) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
|
|
m = m->m_next; |
m = m->m_next; |
} else { |
} else { |
sbfree(&so->so_rcv, m); |
sbfree(&so->so_rcv, m); |
if (paddr) { |
mbuf_removed = 1; |
|
if (paddr != NULL) { |
*paddr = m; |
*paddr = m; |
so->so_rcv.sb_mb = m->m_next; |
so->so_rcv.sb_mb = m->m_next; |
m->m_next = 0; |
m->m_next = NULL; |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} else { |
} else { |
MFREE(m, so->so_rcv.sb_mb); |
MFREE(m, so->so_rcv.sb_mb); |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} |
} |
|
sbsync(&so->so_rcv, nextrecord); |
} |
} |
} |
} |
while (m && m->m_type == MT_CONTROL && error == 0) { |
|
if (flags & MSG_PEEK) { |
/* |
if (controlp) |
* Process one or more MT_CONTROL mbufs present before any data mbufs |
*controlp = m_copy(m, 0, m->m_len); |
* in the first mbuf chain on the socket buffer. If MSG_PEEK, we |
m = m->m_next; |
* just copy the data; if !MSG_PEEK, we call into the protocol to |
} else { |
* perform externalization (or freeing if controlp == NULL). |
sbfree(&so->so_rcv, m); |
*/ |
if (controlp) { |
if (__predict_false(m != NULL && m->m_type == MT_CONTROL)) { |
if (pr->pr_domain->dom_externalize && |
struct mbuf *cm = NULL, *cmn; |
mtod(m, struct cmsghdr *)->cmsg_type == |
struct mbuf **cme = &cm; |
SCM_RIGHTS) |
|
error = (*pr->pr_domain->dom_externalize)(m); |
do { |
*controlp = m; |
if (flags & MSG_PEEK) { |
|
if (controlp != NULL) { |
|
*controlp = m_copy(m, 0, m->m_len); |
|
controlp = &(*controlp)->m_next; |
|
} |
|
m = m->m_next; |
|
} else { |
|
sbfree(&so->so_rcv, m); |
so->so_rcv.sb_mb = m->m_next; |
so->so_rcv.sb_mb = m->m_next; |
m->m_next = 0; |
m->m_next = NULL; |
|
*cme = m; |
|
cme = &(*cme)->m_next; |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
|
} |
|
} while (m != NULL && m->m_type == MT_CONTROL); |
|
if ((flags & MSG_PEEK) == 0) |
|
sbsync(&so->so_rcv, nextrecord); |
|
for (; cm != NULL; cm = cmn) { |
|
cmn = cm->m_next; |
|
cm->m_next = NULL; |
|
type = mtod(cm, struct cmsghdr *)->cmsg_type; |
|
if (controlp != NULL) { |
|
if (dom->dom_externalize != NULL && |
|
type == SCM_RIGHTS) { |
|
sounlock(so); |
|
splx(s); |
|
error = (*dom->dom_externalize)(cm, l); |
|
s = splsoftnet(); |
|
solock(so); |
|
} |
|
*controlp = cm; |
|
while (*controlp != NULL) |
|
controlp = &(*controlp)->m_next; |
} else { |
} else { |
MFREE(m, so->so_rcv.sb_mb); |
/* |
m = so->so_rcv.sb_mb; |
* Dispose of any SCM_RIGHTS message that went |
|
* through the read path rather than recv. |
|
*/ |
|
if (dom->dom_dispose != NULL && |
|
type == SCM_RIGHTS) { |
|
sounlock(so); |
|
(*dom->dom_dispose)(cm); |
|
solock(so); |
|
} |
|
m_freem(cm); |
} |
} |
} |
} |
if (controlp) { |
if (m != NULL) |
orig_resid = 0; |
nextrecord = so->so_rcv.sb_mb->m_nextpkt; |
controlp = &(*controlp)->m_next; |
else |
} |
nextrecord = so->so_rcv.sb_mb; |
|
orig_resid = 0; |
} |
} |
if (m) { |
|
if ((flags & MSG_PEEK) == 0) |
/* If m is non-NULL, we have some data to read. */ |
m->m_nextpkt = nextrecord; |
if (__predict_true(m != NULL)) { |
type = m->m_type; |
type = m->m_type; |
if (type == MT_OOBDATA) |
if (type == MT_OOBDATA) |
flags |= MSG_OOB; |
flags |= MSG_OOB; |
} |
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); |
|
|
moff = 0; |
moff = 0; |
offset = 0; |
offset = 0; |
while (m && uio->uio_resid > 0 && error == 0) { |
while (m != NULL && uio->uio_resid > 0 && error == 0) { |
if (m->m_type == MT_OOBDATA) { |
if (m->m_type == MT_OOBDATA) { |
if (type != MT_OOBDATA) |
if (type != MT_OOBDATA) |
break; |
break; |
|
|
* we must note any additions to the sockbuf when we |
* we must note any additions to the sockbuf when we |
* block interrupts again. |
* block interrupts again. |
*/ |
*/ |
if (mp == 0) { |
if (mp == NULL) { |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); |
|
sounlock(so); |
splx(s); |
splx(s); |
error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); |
error = uiomove(mtod(m, char *) + moff, (int)len, uio); |
s = splnet(); |
s = splsoftnet(); |
|
solock(so); |
|
if (error != 0) { |
|
/* |
|
* If any part of the record has been removed |
|
* (such as the MT_SONAME mbuf, which will |
|
* happen when PR_ADDR, and thus also |
|
* PR_ATOMIC, is set), then drop the entire |
|
* record to maintain the atomicity of the |
|
* receive operation. |
|
* |
|
* This avoids a later panic("receive 1a") |
|
* when compiled with DIAGNOSTIC. |
|
*/ |
|
if (m && mbuf_removed && atomic) |
|
(void) sbdroprecord(&so->so_rcv); |
|
|
|
goto release; |
|
} |
} else |
} else |
uio->uio_resid -= len; |
uio->uio_resid -= len; |
if (len == m->m_len - moff) { |
if (len == m->m_len - moff) { |
|
|
*mp = m; |
*mp = m; |
mp = &m->m_next; |
mp = &m->m_next; |
so->so_rcv.sb_mb = m = m->m_next; |
so->so_rcv.sb_mb = m = m->m_next; |
*mp = (struct mbuf *)0; |
*mp = NULL; |
} else { |
} else { |
MFREE(m, so->so_rcv.sb_mb); |
MFREE(m, so->so_rcv.sb_mb); |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} |
} |
if (m) |
/* |
|
* If m != NULL, we also know that |
|
* so->so_rcv.sb_mb != NULL. |
|
*/ |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
if (m) { |
m->m_nextpkt = nextrecord; |
m->m_nextpkt = nextrecord; |
|
if (nextrecord == NULL) |
|
so->so_rcv.sb_lastrecord = m; |
|
} else { |
|
so->so_rcv.sb_mb = nextrecord; |
|
SB_EMPTY_FIXUP(&so->so_rcv); |
|
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); |
} |
} |
} else { |
} else if (flags & MSG_PEEK) |
if (flags & MSG_PEEK) |
moff += len; |
moff += len; |
else { |
else { |
if (mp != NULL) { |
if (mp) |
mt = m_copym(m, 0, len, M_NOWAIT); |
*mp = m_copym(m, 0, len, M_WAIT); |
if (__predict_false(mt == NULL)) { |
m->m_data += len; |
sounlock(so); |
m->m_len -= len; |
mt = m_copym(m, 0, len, M_WAIT); |
so->so_rcv.sb_cc -= len; |
solock(so); |
|
} |
|
*mp = mt; |
} |
} |
|
m->m_data += len; |
|
m->m_len -= len; |
|
so->so_rcv.sb_cc -= len; |
} |
} |
if (so->so_oobmark) { |
if (so->so_oobmark) { |
if ((flags & MSG_PEEK) == 0) { |
if ((flags & MSG_PEEK) == 0) { |
|
|
so->so_state |= SS_RCVATMARK; |
so->so_state |= SS_RCVATMARK; |
break; |
break; |
} |
} |
} else |
} else { |
offset += len; |
offset += len; |
|
if (offset == so->so_oobmark) |
|
break; |
|
} |
} |
} |
if (flags & MSG_EOR) |
if (flags & MSG_EOR) |
break; |
break; |
|
|
* with a short count but without error. |
* with a short count but without error. |
* Keep sockbuf locked against other readers. |
* Keep sockbuf locked against other readers. |
*/ |
*/ |
while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && |
while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && |
!sosendallatonce(so) && !nextrecord) { |
!sosendallatonce(so) && !nextrecord) { |
if (so->so_error || so->so_state & SS_CANTRCVMORE) |
if (so->so_error || so->so_state & SS_CANTRCVMORE) |
break; |
break; |
|
/* |
|
* If we are peeking and the socket receive buffer is |
|
* full, stop since we can't get more data to peek at. |
|
*/ |
|
if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0) |
|
break; |
|
/* |
|
* If we've drained the socket buffer, tell the |
|
* protocol in case it needs to do something to |
|
* get it filled again. |
|
*/ |
|
if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) |
|
(*pr->pr_usrreq)(so, PRU_RCVD, |
|
NULL, (struct mbuf *)(long)flags, NULL, l); |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); |
error = sbwait(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
if (error) { |
if (error != 0) { |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
|
sounlock(so); |
splx(s); |
splx(s); |
return (0); |
return 0; |
} |
} |
if (m = so->so_rcv.sb_mb) |
if ((m = so->so_rcv.sb_mb) != NULL) |
nextrecord = m->m_nextpkt; |
nextrecord = m->m_nextpkt; |
} |
} |
} |
} |
|
|
if (m && pr->pr_flags & PR_ATOMIC) { |
if (m && atomic) { |
flags |= MSG_TRUNC; |
flags |= MSG_TRUNC; |
if ((flags & MSG_PEEK) == 0) |
if ((flags & MSG_PEEK) == 0) |
(void) sbdroprecord(&so->so_rcv); |
(void) sbdroprecord(&so->so_rcv); |
} |
} |
if ((flags & MSG_PEEK) == 0) { |
if ((flags & MSG_PEEK) == 0) { |
if (m == 0) |
if (m == NULL) { |
|
/* |
|
* First part is an inline SB_EMPTY_FIXUP(). Second |
|
* part makes sure sb_lastrecord is up-to-date if |
|
* there is still data in the socket buffer. |
|
*/ |
so->so_rcv.sb_mb = nextrecord; |
so->so_rcv.sb_mb = nextrecord; |
|
if (so->so_rcv.sb_mb == NULL) { |
|
so->so_rcv.sb_mbtail = NULL; |
|
so->so_rcv.sb_lastrecord = NULL; |
|
} else if (nextrecord->m_nextpkt == NULL) |
|
so->so_rcv.sb_lastrecord = nextrecord; |
|
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); |
if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(*pr->pr_usrreq)(so, PRU_RCVD, NULL, |
(struct mbuf *)flags, (struct mbuf *)0, |
(struct mbuf *)(long)flags, NULL, l); |
(struct mbuf *)0); |
|
} |
} |
if (orig_resid == uio->uio_resid && orig_resid && |
if (orig_resid == uio->uio_resid && orig_resid && |
(flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { |
(flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
splx(s); |
|
goto restart; |
goto restart; |
} |
} |
|
|
if (flagsp) |
if (flagsp != NULL) |
*flagsp |= flags; |
*flagsp |= flags; |
release: |
release: |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
|
sounlock(so); |
splx(s); |
splx(s); |
return (error); |
return error; |
} |
} |
|
|
soshutdown(so, how) |
int |
register struct socket *so; |
soshutdown(struct socket *so, int how) |
register int how; |
|
{ |
{ |
register struct protosw *pr = so->so_proto; |
const struct protosw *pr; |
|
int error; |
|
|
|
KASSERT(solocked(so)); |
|
|
how++; |
pr = so->so_proto; |
if (how & FREAD) |
if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) |
|
return (EINVAL); |
|
|
|
if (how == SHUT_RD || how == SHUT_RDWR) { |
sorflush(so); |
sorflush(so); |
if (how & FWRITE) |
error = 0; |
return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, |
} |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)); |
if (how == SHUT_WR || how == SHUT_RDWR) |
return (0); |
error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, |
|
NULL, NULL, NULL); |
|
|
|
return error; |
} |
} |
|
|
sorflush(so) |
void |
register struct socket *so; |
sorflush(struct socket *so) |
{ |
{ |
register struct sockbuf *sb = &so->so_rcv; |
struct sockbuf *sb, asb; |
register struct protosw *pr = so->so_proto; |
const struct protosw *pr; |
register int s; |
|
struct sockbuf asb; |
|
|
|
sb->sb_flags |= SB_NOINTR; |
KASSERT(solocked(so)); |
(void) sblock(sb); |
|
s = splimp(); |
sb = &so->so_rcv; |
|
pr = so->so_proto; |
socantrcvmore(so); |
socantrcvmore(so); |
|
sb->sb_flags |= SB_NOINTR; |
|
(void )sblock(sb, M_WAITOK); |
sbunlock(sb); |
sbunlock(sb); |
asb = *sb; |
asb = *sb; |
bzero((caddr_t)sb, sizeof (*sb)); |
/* |
splx(s); |
* Clear most of the sockbuf structure, but leave some of the |
if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) |
* fields valid. |
|
*/ |
|
memset(&sb->sb_startzero, 0, |
|
sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); |
|
if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) { |
|
sounlock(so); |
(*pr->pr_domain->dom_dispose)(asb.sb_mb); |
(*pr->pr_domain->dom_dispose)(asb.sb_mb); |
sbrelease(&asb); |
solock(so); |
|
} |
|
sbrelease(&asb, so); |
} |
} |
|
|
sosetopt(so, level, optname, m0) |
/* |
register struct socket *so; |
* internal set SOL_SOCKET options |
int level, optname; |
*/ |
struct mbuf *m0; |
static int |
|
sosetopt1(struct socket *so, const struct sockopt *sopt) |
{ |
{ |
int error = 0; |
int error = 0, optval, opt; |
register struct mbuf *m = m0; |
struct linger l; |
|
struct timeval tv; |
if (level != SOL_SOCKET) { |
|
if (so->so_proto && so->so_proto->pr_ctloutput) |
switch ((opt = sopt->sopt_name)) { |
return ((*so->so_proto->pr_ctloutput) |
|
(PRCO_SETOPT, so, level, optname, &m0)); |
case SO_ACCEPTFILTER: |
error = ENOPROTOOPT; |
error = accept_filt_setopt(so, sopt); |
} else { |
KASSERT(solocked(so)); |
switch (optname) { |
break; |
|
|
|
case SO_LINGER: |
|
error = sockopt_get(sopt, &l, sizeof(l)); |
|
solock(so); |
|
if (error) |
|
break; |
|
if (l.l_linger < 0 || l.l_linger > USHRT_MAX || |
|
l.l_linger > (INT_MAX / hz)) { |
|
error = EDOM; |
|
break; |
|
} |
|
so->so_linger = l.l_linger; |
|
if (l.l_onoff) |
|
so->so_options |= SO_LINGER; |
|
else |
|
so->so_options &= ~SO_LINGER; |
|
break; |
|
|
|
case SO_DEBUG: |
|
case SO_KEEPALIVE: |
|
case SO_DONTROUTE: |
|
case SO_USELOOPBACK: |
|
case SO_BROADCAST: |
|
case SO_REUSEADDR: |
|
case SO_REUSEPORT: |
|
case SO_OOBINLINE: |
|
case SO_TIMESTAMP: |
|
error = sockopt_getint(sopt, &optval); |
|
solock(so); |
|
if (error) |
|
break; |
|
if (optval) |
|
so->so_options |= opt; |
|
else |
|
so->so_options &= ~opt; |
|
break; |
|
|
|
case SO_SNDBUF: |
|
case SO_RCVBUF: |
|
case SO_SNDLOWAT: |
|
case SO_RCVLOWAT: |
|
error = sockopt_getint(sopt, &optval); |
|
solock(so); |
|
if (error) |
|
break; |
|
|
case SO_LINGER: |
/* |
if (m == NULL || m->m_len != sizeof (struct linger)) { |
* Values < 1 make no sense for any of these |
error = EINVAL; |
* options, so disallow them. |
goto bad; |
*/ |
} |
if (optval < 1) { |
so->so_linger = mtod(m, struct linger *)->l_linger; |
error = EINVAL; |
/* fall thru... */ |
|
|
|
case SO_DEBUG: |
|
case SO_KEEPALIVE: |
|
case SO_DONTROUTE: |
|
case SO_USELOOPBACK: |
|
case SO_BROADCAST: |
|
case SO_REUSEADDR: |
|
case SO_OOBINLINE: |
|
if (m == NULL || m->m_len < sizeof (int)) { |
|
error = EINVAL; |
|
goto bad; |
|
} |
|
if (*mtod(m, int *)) |
|
so->so_options |= optname; |
|
else |
|
so->so_options &= ~optname; |
|
break; |
break; |
|
} |
|
|
|
switch (opt) { |
case SO_SNDBUF: |
case SO_SNDBUF: |
case SO_RCVBUF: |
if (sbreserve(&so->so_snd, (u_long)optval, so) == 0) { |
case SO_SNDLOWAT: |
error = ENOBUFS; |
case SO_RCVLOWAT: |
|
if (m == NULL || m->m_len < sizeof (int)) { |
|
error = EINVAL; |
|
goto bad; |
|
} |
|
switch (optname) { |
|
|
|
case SO_SNDBUF: |
|
case SO_RCVBUF: |
|
if (sbreserve(optname == SO_SNDBUF ? |
|
&so->so_snd : &so->so_rcv, |
|
(u_long) *mtod(m, int *)) == 0) { |
|
error = ENOBUFS; |
|
goto bad; |
|
} |
|
break; |
break; |
|
} |
|
so->so_snd.sb_flags &= ~SB_AUTOSIZE; |
|
break; |
|
|
case SO_SNDLOWAT: |
case SO_RCVBUF: |
so->so_snd.sb_lowat = *mtod(m, int *); |
if (sbreserve(&so->so_rcv, (u_long)optval, so) == 0) { |
break; |
error = ENOBUFS; |
case SO_RCVLOWAT: |
|
so->so_rcv.sb_lowat = *mtod(m, int *); |
|
break; |
break; |
} |
} |
|
so->so_rcv.sb_flags &= ~SB_AUTOSIZE; |
break; |
break; |
|
|
case SO_SNDTIMEO: |
/* |
case SO_RCVTIMEO: |
* Make sure the low-water is never greater than |
{ |
* the high-water. |
struct timeval *tv; |
*/ |
short val; |
case SO_SNDLOWAT: |
|
if (optval > so->so_snd.sb_hiwat) |
|
optval = so->so_snd.sb_hiwat; |
|
|
if (m == NULL || m->m_len < sizeof (*tv)) { |
so->so_snd.sb_lowat = optval; |
error = EINVAL; |
break; |
goto bad; |
|
} |
|
tv = mtod(m, struct timeval *); |
|
if (tv->tv_sec > SHRT_MAX / hz - hz) { |
|
error = EDOM; |
|
goto bad; |
|
} |
|
val = tv->tv_sec * hz + tv->tv_usec / tick; |
|
|
|
switch (optname) { |
case SO_RCVLOWAT: |
|
if (optval > so->so_rcv.sb_hiwat) |
|
optval = so->so_rcv.sb_hiwat; |
|
|
case SO_SNDTIMEO: |
so->so_rcv.sb_lowat = optval; |
so->so_snd.sb_timeo = val; |
break; |
break; |
} |
case SO_RCVTIMEO: |
break; |
so->so_rcv.sb_timeo = val; |
|
break; |
#ifdef COMPAT_50 |
} |
case SO_OSNDTIMEO: |
|
case SO_ORCVTIMEO: { |
|
struct timeval50 otv; |
|
error = sockopt_get(sopt, &otv, sizeof(otv)); |
|
timeval50_to_timeval(&otv, &tv); |
|
opt = opt == SO_OSNDTIMEO ? SO_SNDTIMEO : SO_RCVTIMEO; |
|
error = 1; |
|
/*FALLTHROUGH*/ |
|
} |
|
#endif /* COMPAT_50 */ |
|
|
|
case SO_SNDTIMEO: |
|
case SO_RCVTIMEO: |
|
if (error == 0) |
|
error = sockopt_get(sopt, &tv, sizeof(tv)); |
|
solock(so); |
|
if (error) |
|
break; |
|
|
|
if (tv.tv_sec > (INT_MAX - tv.tv_usec / tick) / hz) { |
|
error = EDOM; |
break; |
break; |
} |
} |
|
|
|
optval = tv.tv_sec * hz + tv.tv_usec / tick; |
|
if (optval == 0 && tv.tv_usec != 0) |
|
optval = 1; |
|
|
default: |
switch (opt) { |
error = ENOPROTOOPT; |
case SO_SNDTIMEO: |
|
so->so_snd.sb_timeo = optval; |
|
break; |
|
case SO_RCVTIMEO: |
|
so->so_rcv.sb_timeo = optval; |
break; |
break; |
} |
} |
|
break; |
|
|
|
default: |
|
solock(so); |
|
error = ENOPROTOOPT; |
|
break; |
|
} |
|
KASSERT(solocked(so)); |
|
return error; |
|
} |
|
|
|
int |
|
sosetopt(struct socket *so, struct sockopt *sopt) |
|
{ |
|
int error, prerr; |
|
|
|
if (sopt->sopt_level == SOL_SOCKET) { |
|
error = sosetopt1(so, sopt); |
|
KASSERT(solocked(so)); |
|
} else { |
|
error = ENOPROTOOPT; |
|
solock(so); |
|
} |
|
|
|
if ((error == 0 || error == ENOPROTOOPT) && |
|
so->so_proto != NULL && so->so_proto->pr_ctloutput != NULL) { |
|
/* give the protocol stack a shot */ |
|
prerr = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, sopt); |
|
if (prerr == 0) |
|
error = 0; |
|
else if (prerr != ENOPROTOOPT) |
|
error = prerr; |
|
} |
|
sounlock(so); |
|
return error; |
|
} |
|
|
|
/* |
|
* so_setsockopt() is a wrapper providing a sockopt structure for sosetopt() |
|
*/ |
|
int |
|
so_setsockopt(struct lwp *l, struct socket *so, int level, int name, |
|
const void *val, size_t valsize) |
|
{ |
|
struct sockopt sopt; |
|
int error; |
|
|
|
KASSERT(valsize == 0 || val != NULL); |
|
|
|
sockopt_init(&sopt, level, name, valsize); |
|
sockopt_set(&sopt, val, valsize); |
|
|
|
error = sosetopt(so, &sopt); |
|
|
|
sockopt_destroy(&sopt); |
|
|
|
return error; |
|
} |
|
|
|
/* |
|
* internal get SOL_SOCKET options |
|
*/ |
|
static int |
|
sogetopt1(struct socket *so, struct sockopt *sopt) |
|
{ |
|
int error, optval, opt; |
|
struct linger l; |
|
struct timeval tv; |
|
|
|
switch ((opt = sopt->sopt_name)) { |
|
|
|
case SO_ACCEPTFILTER: |
|
error = accept_filt_getopt(so, sopt); |
|
break; |
|
|
|
case SO_LINGER: |
|
l.l_onoff = (so->so_options & SO_LINGER) ? 1 : 0; |
|
l.l_linger = so->so_linger; |
|
|
|
error = sockopt_set(sopt, &l, sizeof(l)); |
|
break; |
|
|
|
case SO_USELOOPBACK: |
|
case SO_DONTROUTE: |
|
case SO_DEBUG: |
|
case SO_KEEPALIVE: |
|
case SO_REUSEADDR: |
|
case SO_REUSEPORT: |
|
case SO_BROADCAST: |
|
case SO_OOBINLINE: |
|
case SO_TIMESTAMP: |
|
error = sockopt_setint(sopt, (so->so_options & opt) ? 1 : 0); |
|
break; |
|
|
|
case SO_TYPE: |
|
error = sockopt_setint(sopt, so->so_type); |
|
break; |
|
|
|
case SO_ERROR: |
|
error = sockopt_setint(sopt, so->so_error); |
|
so->so_error = 0; |
|
break; |
|
|
|
case SO_SNDBUF: |
|
error = sockopt_setint(sopt, so->so_snd.sb_hiwat); |
|
break; |
|
|
|
case SO_RCVBUF: |
|
error = sockopt_setint(sopt, so->so_rcv.sb_hiwat); |
|
break; |
|
|
|
case SO_SNDLOWAT: |
|
error = sockopt_setint(sopt, so->so_snd.sb_lowat); |
|
break; |
|
|
|
case SO_RCVLOWAT: |
|
error = sockopt_setint(sopt, so->so_rcv.sb_lowat); |
|
break; |
|
|
|
#ifdef COMPAT_50 |
|
case SO_OSNDTIMEO: |
|
case SO_ORCVTIMEO: { |
|
struct timeval50 otv; |
|
|
|
optval = (opt == SO_OSNDTIMEO ? |
|
so->so_snd.sb_timeo : so->so_rcv.sb_timeo); |
|
|
|
otv.tv_sec = optval / hz; |
|
otv.tv_usec = (optval % hz) * tick; |
|
|
|
error = sockopt_set(sopt, &otv, sizeof(otv)); |
|
break; |
|
} |
|
#endif /* COMPAT_50 */ |
|
|
|
case SO_SNDTIMEO: |
|
case SO_RCVTIMEO: |
|
optval = (opt == SO_SNDTIMEO ? |
|
so->so_snd.sb_timeo : so->so_rcv.sb_timeo); |
|
|
|
tv.tv_sec = optval / hz; |
|
tv.tv_usec = (optval % hz) * tick; |
|
|
|
error = sockopt_set(sopt, &tv, sizeof(tv)); |
|
break; |
|
|
|
case SO_OVERFLOWED: |
|
error = sockopt_setint(sopt, so->so_rcv.sb_overflowed); |
|
break; |
|
|
|
default: |
|
error = ENOPROTOOPT; |
|
break; |
} |
} |
bad: |
|
if (m) |
|
(void) m_free(m); |
|
return (error); |
return (error); |
} |
} |
|
|
sogetopt(so, level, optname, mp) |
int |
register struct socket *so; |
sogetopt(struct socket *so, struct sockopt *sopt) |
int level, optname; |
|
struct mbuf **mp; |
|
{ |
{ |
register struct mbuf *m; |
int error; |
|
|
if (level != SOL_SOCKET) { |
solock(so); |
|
if (sopt->sopt_level != SOL_SOCKET) { |
if (so->so_proto && so->so_proto->pr_ctloutput) { |
if (so->so_proto && so->so_proto->pr_ctloutput) { |
return ((*so->so_proto->pr_ctloutput) |
error = ((*so->so_proto->pr_ctloutput) |
(PRCO_GETOPT, so, level, optname, mp)); |
(PRCO_GETOPT, so, sopt)); |
} else |
} else |
return (ENOPROTOOPT); |
error = (ENOPROTOOPT); |
} else { |
} else { |
m = m_get(M_WAIT, MT_SOOPTS); |
error = sogetopt1(so, sopt); |
m->m_len = sizeof (int); |
} |
|
sounlock(so); |
|
return (error); |
|
} |
|
|
switch (optname) { |
/* |
|
* alloc sockopt data buffer buffer |
|
* - will be released at destroy |
|
*/ |
|
static int |
|
sockopt_alloc(struct sockopt *sopt, size_t len, km_flag_t kmflag) |
|
{ |
|
|
case SO_LINGER: |
KASSERT(sopt->sopt_size == 0); |
m->m_len = sizeof (struct linger); |
|
mtod(m, struct linger *)->l_onoff = |
|
so->so_options & SO_LINGER; |
|
mtod(m, struct linger *)->l_linger = so->so_linger; |
|
break; |
|
|
|
case SO_USELOOPBACK: |
if (len > sizeof(sopt->sopt_buf)) { |
case SO_DONTROUTE: |
sopt->sopt_data = kmem_zalloc(len, kmflag); |
case SO_DEBUG: |
if (sopt->sopt_data == NULL) |
case SO_KEEPALIVE: |
return ENOMEM; |
case SO_REUSEADDR: |
} else |
case SO_BROADCAST: |
sopt->sopt_data = sopt->sopt_buf; |
case SO_OOBINLINE: |
|
*mtod(m, int *) = so->so_options & optname; |
|
break; |
|
|
|
case SO_TYPE: |
sopt->sopt_size = len; |
*mtod(m, int *) = so->so_type; |
return 0; |
break; |
} |
|
|
case SO_ERROR: |
/* |
*mtod(m, int *) = so->so_error; |
* initialise sockopt storage |
so->so_error = 0; |
* - MAY sleep during allocation |
break; |
*/ |
|
void |
|
sockopt_init(struct sockopt *sopt, int level, int name, size_t size) |
|
{ |
|
|
case SO_SNDBUF: |
memset(sopt, 0, sizeof(*sopt)); |
*mtod(m, int *) = so->so_snd.sb_hiwat; |
|
break; |
|
|
|
case SO_RCVBUF: |
sopt->sopt_level = level; |
*mtod(m, int *) = so->so_rcv.sb_hiwat; |
sopt->sopt_name = name; |
break; |
(void)sockopt_alloc(sopt, size, KM_SLEEP); |
|
} |
|
|
case SO_SNDLOWAT: |
/* |
*mtod(m, int *) = so->so_snd.sb_lowat; |
* destroy sockopt storage |
break; |
* - will release any held memory references |
|
*/ |
|
void |
|
sockopt_destroy(struct sockopt *sopt) |
|
{ |
|
|
case SO_RCVLOWAT: |
if (sopt->sopt_data != sopt->sopt_buf) |
*mtod(m, int *) = so->so_rcv.sb_lowat; |
kmem_free(sopt->sopt_data, sopt->sopt_size); |
break; |
|
|
|
case SO_SNDTIMEO: |
memset(sopt, 0, sizeof(*sopt)); |
case SO_RCVTIMEO: |
} |
{ |
|
int val = (optname == SO_SNDTIMEO ? |
/* |
so->so_snd.sb_timeo : so->so_rcv.sb_timeo); |
* set sockopt value |
|
* - value is copied into sockopt |
m->m_len = sizeof(struct timeval); |
* - memory is allocated when necessary, will not sleep |
mtod(m, struct timeval *)->tv_sec = val / hz; |
*/ |
mtod(m, struct timeval *)->tv_usec = |
int |
(val % hz) / tick; |
sockopt_set(struct sockopt *sopt, const void *buf, size_t len) |
break; |
{ |
} |
int error; |
|
|
default: |
if (sopt->sopt_size == 0) { |
(void)m_free(m); |
error = sockopt_alloc(sopt, len, KM_NOSLEEP); |
return (ENOPROTOOPT); |
if (error) |
|
return error; |
|
} |
|
|
|
KASSERT(sopt->sopt_size == len); |
|
memcpy(sopt->sopt_data, buf, len); |
|
return 0; |
|
} |
|
|
|
/* |
|
* common case of set sockopt integer value |
|
*/ |
|
int |
|
sockopt_setint(struct sockopt *sopt, int val) |
|
{ |
|
|
|
return sockopt_set(sopt, &val, sizeof(int)); |
|
} |
|
|
|
/* |
|
* get sockopt value |
|
* - correct size must be given |
|
*/ |
|
int |
|
sockopt_get(const struct sockopt *sopt, void *buf, size_t len) |
|
{ |
|
|
|
if (sopt->sopt_size != len) |
|
return EINVAL; |
|
|
|
memcpy(buf, sopt->sopt_data, len); |
|
return 0; |
|
} |
|
|
|
/* |
|
* common case of get sockopt integer value |
|
*/ |
|
int |
|
sockopt_getint(const struct sockopt *sopt, int *valp) |
|
{ |
|
|
|
return sockopt_get(sopt, valp, sizeof(int)); |
|
} |
|
|
|
/* |
|
* set sockopt value from mbuf |
|
* - ONLY for legacy code |
|
* - mbuf is released by sockopt |
|
* - will not sleep |
|
*/ |
|
int |
|
sockopt_setmbuf(struct sockopt *sopt, struct mbuf *m) |
|
{ |
|
size_t len; |
|
int error; |
|
|
|
len = m_length(m); |
|
|
|
if (sopt->sopt_size == 0) { |
|
error = sockopt_alloc(sopt, len, KM_NOSLEEP); |
|
if (error) |
|
return error; |
|
} |
|
|
|
KASSERT(sopt->sopt_size == len); |
|
m_copydata(m, 0, len, sopt->sopt_data); |
|
m_freem(m); |
|
|
|
return 0; |
|
} |
|
|
|
/* |
|
* get sockopt value into mbuf |
|
* - ONLY for legacy code |
|
* - mbuf to be released by the caller |
|
* - will not sleep |
|
*/ |
|
struct mbuf * |
|
sockopt_getmbuf(const struct sockopt *sopt) |
|
{ |
|
struct mbuf *m; |
|
|
|
if (sopt->sopt_size > MCLBYTES) |
|
return NULL; |
|
|
|
m = m_get(M_DONTWAIT, MT_SOOPTS); |
|
if (m == NULL) |
|
return NULL; |
|
|
|
if (sopt->sopt_size > MLEN) { |
|
MCLGET(m, M_DONTWAIT); |
|
if ((m->m_flags & M_EXT) == 0) { |
|
m_free(m); |
|
return NULL; |
} |
} |
*mp = m; |
|
return (0); |
|
} |
} |
|
|
|
memcpy(mtod(m, void *), sopt->sopt_data, sopt->sopt_size); |
|
m->m_len = sopt->sopt_size; |
|
|
|
return m; |
|
} |
|
|
|
void |
|
sohasoutofband(struct socket *so) |
|
{ |
|
|
|
fownsignal(so->so_pgid, SIGURG, POLL_PRI, POLLPRI|POLLRDBAND, so); |
|
selnotify(&so->so_rcv.sb_sel, POLLPRI | POLLRDBAND, 0); |
|
} |
|
|
|
static void |
|
filt_sordetach(struct knote *kn) |
|
{ |
|
struct socket *so; |
|
|
|
so = ((file_t *)kn->kn_obj)->f_data; |
|
solock(so); |
|
SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext); |
|
if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist)) |
|
so->so_rcv.sb_flags &= ~SB_KNOTE; |
|
sounlock(so); |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_soread(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
int rv; |
|
|
|
so = ((file_t *)kn->kn_obj)->f_data; |
|
if (hint != NOTE_SUBMIT) |
|
solock(so); |
|
kn->kn_data = so->so_rcv.sb_cc; |
|
if (so->so_state & SS_CANTRCVMORE) { |
|
kn->kn_flags |= EV_EOF; |
|
kn->kn_fflags = so->so_error; |
|
rv = 1; |
|
} else if (so->so_error) /* temporary udp error */ |
|
rv = 1; |
|
else if (kn->kn_sfflags & NOTE_LOWAT) |
|
rv = (kn->kn_data >= kn->kn_sdata); |
|
else |
|
rv = (kn->kn_data >= so->so_rcv.sb_lowat); |
|
if (hint != NOTE_SUBMIT) |
|
sounlock(so); |
|
return rv; |
|
} |
|
|
|
static void |
|
filt_sowdetach(struct knote *kn) |
|
{ |
|
struct socket *so; |
|
|
|
so = ((file_t *)kn->kn_obj)->f_data; |
|
solock(so); |
|
SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext); |
|
if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist)) |
|
so->so_snd.sb_flags &= ~SB_KNOTE; |
|
sounlock(so); |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_sowrite(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
int rv; |
|
|
|
so = ((file_t *)kn->kn_obj)->f_data; |
|
if (hint != NOTE_SUBMIT) |
|
solock(so); |
|
kn->kn_data = sbspace(&so->so_snd); |
|
if (so->so_state & SS_CANTSENDMORE) { |
|
kn->kn_flags |= EV_EOF; |
|
kn->kn_fflags = so->so_error; |
|
rv = 1; |
|
} else if (so->so_error) /* temporary udp error */ |
|
rv = 1; |
|
else if (((so->so_state & SS_ISCONNECTED) == 0) && |
|
(so->so_proto->pr_flags & PR_CONNREQUIRED)) |
|
rv = 0; |
|
else if (kn->kn_sfflags & NOTE_LOWAT) |
|
rv = (kn->kn_data >= kn->kn_sdata); |
|
else |
|
rv = (kn->kn_data >= so->so_snd.sb_lowat); |
|
if (hint != NOTE_SUBMIT) |
|
sounlock(so); |
|
return rv; |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_solisten(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
int rv; |
|
|
|
so = ((file_t *)kn->kn_obj)->f_data; |
|
|
|
/* |
|
* Set kn_data to number of incoming connections, not |
|
* counting partial (incomplete) connections. |
|
*/ |
|
if (hint != NOTE_SUBMIT) |
|
solock(so); |
|
kn->kn_data = so->so_qlen; |
|
rv = (kn->kn_data > 0); |
|
if (hint != NOTE_SUBMIT) |
|
sounlock(so); |
|
return rv; |
|
} |
|
|
|
static const struct filterops solisten_filtops = |
|
{ 1, NULL, filt_sordetach, filt_solisten }; |
|
static const struct filterops soread_filtops = |
|
{ 1, NULL, filt_sordetach, filt_soread }; |
|
static const struct filterops sowrite_filtops = |
|
{ 1, NULL, filt_sowdetach, filt_sowrite }; |
|
|
|
int |
|
soo_kqfilter(struct file *fp, struct knote *kn) |
|
{ |
|
struct socket *so; |
|
struct sockbuf *sb; |
|
|
|
so = ((file_t *)kn->kn_obj)->f_data; |
|
solock(so); |
|
switch (kn->kn_filter) { |
|
case EVFILT_READ: |
|
if (so->so_options & SO_ACCEPTCONN) |
|
kn->kn_fop = &solisten_filtops; |
|
else |
|
kn->kn_fop = &soread_filtops; |
|
sb = &so->so_rcv; |
|
break; |
|
case EVFILT_WRITE: |
|
kn->kn_fop = &sowrite_filtops; |
|
sb = &so->so_snd; |
|
break; |
|
default: |
|
sounlock(so); |
|
return (EINVAL); |
|
} |
|
SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext); |
|
sb->sb_flags |= SB_KNOTE; |
|
sounlock(so); |
|
return (0); |
|
} |
|
|
|
static int |
|
sodopoll(struct socket *so, int events) |
|
{ |
|
int revents; |
|
|
|
revents = 0; |
|
|
|
if (events & (POLLIN | POLLRDNORM)) |
|
if (soreadable(so)) |
|
revents |= events & (POLLIN | POLLRDNORM); |
|
|
|
if (events & (POLLOUT | POLLWRNORM)) |
|
if (sowritable(so)) |
|
revents |= events & (POLLOUT | POLLWRNORM); |
|
|
|
if (events & (POLLPRI | POLLRDBAND)) |
|
if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) |
|
revents |= events & (POLLPRI | POLLRDBAND); |
|
|
|
return revents; |
|
} |
|
|
|
int |
|
sopoll(struct socket *so, int events) |
|
{ |
|
int revents = 0; |
|
|
|
#ifndef DIAGNOSTIC |
|
/* |
|
* Do a quick, unlocked check in expectation that the socket |
|
* will be ready for I/O. Don't do this check if DIAGNOSTIC, |
|
* as the solocked() assertions will fail. |
|
*/ |
|
if ((revents = sodopoll(so, events)) != 0) |
|
return revents; |
|
#endif |
|
|
|
solock(so); |
|
if ((revents = sodopoll(so, events)) == 0) { |
|
if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { |
|
selrecord(curlwp, &so->so_rcv.sb_sel); |
|
so->so_rcv.sb_flags |= SB_NOTIFY; |
|
} |
|
|
|
if (events & (POLLOUT | POLLWRNORM)) { |
|
selrecord(curlwp, &so->so_snd.sb_sel); |
|
so->so_snd.sb_flags |= SB_NOTIFY; |
|
} |
|
} |
|
sounlock(so); |
|
|
|
return revents; |
|
} |
|
|
|
|
|
#include <sys/sysctl.h> |
|
|
|
static int sysctl_kern_somaxkva(SYSCTLFN_PROTO); |
|
|
|
/* |
|
* sysctl helper routine for kern.somaxkva. ensures that the given |
|
* value is not too small. |
|
* (XXX should we maybe make sure it's not too large as well?) |
|
*/ |
|
static int |
|
sysctl_kern_somaxkva(SYSCTLFN_ARGS) |
|
{ |
|
int error, new_somaxkva; |
|
struct sysctlnode node; |
|
|
|
new_somaxkva = somaxkva; |
|
node = *rnode; |
|
node.sysctl_data = &new_somaxkva; |
|
error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
|
if (error || newp == NULL) |
|
return (error); |
|
|
|
if (new_somaxkva < (16 * 1024 * 1024)) /* sanity */ |
|
return (EINVAL); |
|
|
|
mutex_enter(&so_pendfree_lock); |
|
somaxkva = new_somaxkva; |
|
cv_broadcast(&socurkva_cv); |
|
mutex_exit(&so_pendfree_lock); |
|
|
|
return (error); |
} |
} |
|
|
sohasoutofband(so) |
static void |
register struct socket *so; |
sysctl_kern_somaxkva_setup() |
{ |
{ |
struct proc *p; |
|
|
|
if (so->so_pgid < 0) |
KASSERT(socket_sysctllog == NULL); |
gsignal(-so->so_pgid, SIGURG); |
sysctl_createv(&socket_sysctllog, 0, NULL, NULL, |
else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) |
CTLFLAG_PERMANENT, |
psignal(p, SIGURG); |
CTLTYPE_NODE, "kern", NULL, |
selwakeup(&so->so_rcv.sb_sel); |
NULL, 0, NULL, 0, |
|
CTL_KERN, CTL_EOL); |
|
|
|
sysctl_createv(&socket_sysctllog, 0, NULL, NULL, |
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
|
CTLTYPE_INT, "somaxkva", |
|
SYSCTL_DESCR("Maximum amount of kernel memory to be " |
|
"used for socket buffers"), |
|
sysctl_kern_somaxkva, 0, NULL, 0, |
|
CTL_KERN, KERN_SOMAXKVA, CTL_EOL); |
} |
} |