version 1.44.6.2, 1999/07/06 11:02:39 |
version 1.107, 2004/09/03 18:14:09 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
|
/*- |
|
* Copyright (c) 2002 The NetBSD Foundation, Inc. |
|
* All rights reserved. |
|
* |
|
* This code is derived from software contributed to The NetBSD Foundation |
|
* by Jason R. Thorpe of Wasabi Systems, Inc. |
|
* |
|
* Redistribution and use in source and binary forms, with or without |
|
* modification, are permitted provided that the following conditions |
|
* are met: |
|
* 1. Redistributions of source code must retain the above copyright |
|
* notice, this list of conditions and the following disclaimer. |
|
* 2. Redistributions in binary form must reproduce the above copyright |
|
* notice, this list of conditions and the following disclaimer in the |
|
* documentation and/or other materials provided with the distribution. |
|
* 3. All advertising materials mentioning features or use of this software |
|
* must display the following acknowledgement: |
|
* This product includes software developed by the NetBSD |
|
* Foundation, Inc. and its contributors. |
|
* 4. Neither the name of The NetBSD Foundation nor the names of its |
|
* contributors may be used to endorse or promote products derived |
|
* from this software without specific prior written permission. |
|
* |
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
|
* POSSIBILITY OF SUCH DAMAGE. |
|
*/ |
|
|
/* |
/* |
* Copyright (c) 1982, 1986, 1988, 1990, 1993 |
* Copyright (c) 1982, 1986, 1988, 1990, 1993 |
* The Regents of the University of California. All rights reserved. |
* The Regents of the University of California. All rights reserved. |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
* 2. Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* documentation and/or other materials provided with the distribution. |
* 3. All advertising materials mentioning features or use of this software |
* 3. Neither the name of the University nor the names of its contributors |
* must display the following acknowledgement: |
|
* This product includes software developed by the University of |
|
* California, Berkeley and its contributors. |
|
* 4. Neither the name of the University nor the names of its contributors |
|
* may be used to endorse or promote products derived from this software |
* may be used to endorse or promote products derived from this software |
* without specific prior written permission. |
* without specific prior written permission. |
* |
* |
|
|
* @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95 |
* @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95 |
*/ |
*/ |
|
|
#include "opt_compat_sunos.h" |
#include <sys/cdefs.h> |
|
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include "opt_sock_counters.h" |
|
#include "opt_sosend_loan.h" |
|
#include "opt_mbuftrace.h" |
|
#include "opt_somaxkva.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
|
|
#include <sys/signalvar.h> |
#include <sys/signalvar.h> |
#include <sys/resourcevar.h> |
#include <sys/resourcevar.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
|
#include <sys/event.h> |
|
#include <sys/poll.h> |
|
|
|
#include <uvm/uvm.h> |
|
|
struct pool socket_pool; |
POOL_INIT(socket_pool, sizeof(struct socket), 0, 0, 0, "sockpl", NULL); |
|
|
|
MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options"); |
|
MALLOC_DEFINE(M_SONAME, "soname", "socket name"); |
|
|
|
extern int somaxconn; /* patchable (XXX sysctl) */ |
|
int somaxconn = SOMAXCONN; |
|
|
|
#ifdef SOSEND_COUNTERS |
|
#include <sys/device.h> |
|
|
|
struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "loan big"); |
|
struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "copy big"); |
|
struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "copy small"); |
|
struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
|
NULL, "sosend", "kva limit"); |
|
|
|
#define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++ |
|
|
|
EVCNT_ATTACH_STATIC(sosend_loan_big); |
|
EVCNT_ATTACH_STATIC(sosend_copy_big); |
|
EVCNT_ATTACH_STATIC(sosend_copy_small); |
|
EVCNT_ATTACH_STATIC(sosend_kvalimit); |
|
#else |
|
|
|
#define SOSEND_COUNTER_INCR(ev) /* nothing */ |
|
|
|
#endif /* SOSEND_COUNTERS */ |
|
|
void |
void |
soinit() |
soinit(void) |
{ |
{ |
|
|
pool_init(&socket_pool, sizeof(struct socket), 0, 0, 0, |
/* Set the initial adjusted socket buffer size. */ |
"sockpl", 0, NULL, NULL, M_SOCKET); |
if (sb_max_set(sb_max)) |
|
panic("bad initial sb_max value: %lu\n", sb_max); |
|
|
} |
} |
|
|
#ifdef KEY |
#ifdef SOSEND_NO_LOAN |
#include <netkey/key.h> |
int use_sosend_loan = 0; |
|
#else |
|
int use_sosend_loan = 1; |
#endif |
#endif |
|
|
|
struct simplelock so_pendfree_slock = SIMPLELOCK_INITIALIZER; |
|
struct mbuf *so_pendfree; |
|
|
|
#ifndef SOMAXKVA |
|
#define SOMAXKVA (16 * 1024 * 1024) |
|
#endif |
|
int somaxkva = SOMAXKVA; |
|
int socurkva; |
|
int sokvawaiters; |
|
|
|
#define SOCK_LOAN_THRESH 4096 |
|
#define SOCK_LOAN_CHUNK 65536 |
|
|
|
static size_t sodopendfree(struct socket *); |
|
static size_t sodopendfreel(struct socket *); |
|
static __inline vsize_t sokvareserve(struct socket *, vsize_t); |
|
static __inline void sokvaunreserve(vsize_t); |
|
|
|
static __inline vsize_t |
|
sokvareserve(struct socket *so, vsize_t len) |
|
{ |
|
int s; |
|
int error; |
|
|
|
s = splvm(); |
|
simple_lock(&so_pendfree_slock); |
|
while (socurkva + len > somaxkva) { |
|
size_t freed; |
|
|
|
/* |
|
* try to do pendfree. |
|
*/ |
|
|
|
freed = sodopendfreel(so); |
|
|
|
/* |
|
* if some kva was freed, try again. |
|
*/ |
|
|
|
if (freed) |
|
continue; |
|
|
|
SOSEND_COUNTER_INCR(&sosend_kvalimit); |
|
sokvawaiters++; |
|
error = ltsleep(&socurkva, PVM | PCATCH, "sokva", 0, |
|
&so_pendfree_slock); |
|
sokvawaiters--; |
|
if (error) { |
|
len = 0; |
|
break; |
|
} |
|
} |
|
socurkva += len; |
|
simple_unlock(&so_pendfree_slock); |
|
splx(s); |
|
return len; |
|
} |
|
|
|
static __inline void |
|
sokvaunreserve(vsize_t len) |
|
{ |
|
int s; |
|
|
|
s = splvm(); |
|
simple_lock(&so_pendfree_slock); |
|
socurkva -= len; |
|
if (sokvawaiters) |
|
wakeup(&socurkva); |
|
simple_unlock(&so_pendfree_slock); |
|
splx(s); |
|
} |
|
|
|
/* |
|
* sokvaalloc: allocate kva for loan. |
|
*/ |
|
|
|
vaddr_t |
|
sokvaalloc(vsize_t len, struct socket *so) |
|
{ |
|
vaddr_t lva; |
|
|
|
/* |
|
* reserve kva. |
|
*/ |
|
|
|
if (sokvareserve(so, len) == 0) |
|
return 0; |
|
|
|
/* |
|
* allocate kva. |
|
*/ |
|
|
|
lva = uvm_km_valloc_wait(kernel_map, len); |
|
if (lva == 0) { |
|
sokvaunreserve(len); |
|
return (0); |
|
} |
|
|
|
return lva; |
|
} |
|
|
|
/* |
|
* sokvafree: free kva for loan. |
|
*/ |
|
|
|
void |
|
sokvafree(vaddr_t sva, vsize_t len) |
|
{ |
|
|
|
/* |
|
* free kva. |
|
*/ |
|
|
|
uvm_km_free(kernel_map, sva, len); |
|
|
|
/* |
|
* unreserve kva. |
|
*/ |
|
|
|
sokvaunreserve(len); |
|
} |
|
|
|
static void |
|
sodoloanfree(struct vm_page **pgs, caddr_t buf, size_t size) |
|
{ |
|
vaddr_t va, sva, eva; |
|
vsize_t len; |
|
paddr_t pa; |
|
int i, npgs; |
|
|
|
eva = round_page((vaddr_t) buf + size); |
|
sva = trunc_page((vaddr_t) buf); |
|
len = eva - sva; |
|
npgs = len >> PAGE_SHIFT; |
|
|
|
if (__predict_false(pgs == NULL)) { |
|
pgs = alloca(npgs * sizeof(*pgs)); |
|
|
|
for (i = 0, va = sva; va < eva; i++, va += PAGE_SIZE) { |
|
if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) |
|
panic("sodoloanfree: va 0x%lx not mapped", va); |
|
pgs[i] = PHYS_TO_VM_PAGE(pa); |
|
} |
|
} |
|
|
|
pmap_kremove(sva, len); |
|
pmap_update(pmap_kernel()); |
|
uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE); |
|
sokvafree(sva, len); |
|
} |
|
|
|
static size_t |
|
sodopendfree(struct socket *so) |
|
{ |
|
int s; |
|
size_t rv; |
|
|
|
s = splvm(); |
|
simple_lock(&so_pendfree_slock); |
|
rv = sodopendfreel(so); |
|
simple_unlock(&so_pendfree_slock); |
|
splx(s); |
|
|
|
return rv; |
|
} |
|
|
|
/* |
|
* sodopendfreel: free mbufs on "pendfree" list. |
|
* unlock and relock so_pendfree_slock when freeing mbufs. |
|
* |
|
* => called with so_pendfree_slock held. |
|
* => called at splvm. |
|
*/ |
|
|
|
static size_t |
|
sodopendfreel(struct socket *so) |
|
{ |
|
size_t rv = 0; |
|
|
|
LOCK_ASSERT(simple_lock_held(&so_pendfree_slock)); |
|
|
|
for (;;) { |
|
struct mbuf *m; |
|
struct mbuf *next; |
|
|
|
m = so_pendfree; |
|
if (m == NULL) |
|
break; |
|
so_pendfree = NULL; |
|
simple_unlock(&so_pendfree_slock); |
|
/* XXX splx */ |
|
|
|
for (; m != NULL; m = next) { |
|
next = m->m_next; |
|
|
|
rv += m->m_ext.ext_size; |
|
sodoloanfree((m->m_flags & M_EXT_PAGES) ? |
|
m->m_ext.ext_pgs : NULL, m->m_ext.ext_buf, |
|
m->m_ext.ext_size); |
|
pool_cache_put(&mbpool_cache, m); |
|
} |
|
|
|
/* XXX splvm */ |
|
simple_lock(&so_pendfree_slock); |
|
} |
|
|
|
return (rv); |
|
} |
|
|
|
void |
|
soloanfree(struct mbuf *m, caddr_t buf, size_t size, void *arg) |
|
{ |
|
int s; |
|
|
|
if (m == NULL) { |
|
|
|
/* |
|
* called from MEXTREMOVE. |
|
*/ |
|
|
|
sodoloanfree(NULL, buf, size); |
|
return; |
|
} |
|
|
|
/* |
|
* postpone freeing mbuf. |
|
* |
|
* we can't do it in interrupt context |
|
* because we need to put kva back to kernel_map. |
|
*/ |
|
|
|
s = splvm(); |
|
simple_lock(&so_pendfree_slock); |
|
m->m_next = so_pendfree; |
|
so_pendfree = m; |
|
if (sokvawaiters) |
|
wakeup(&socurkva); |
|
simple_unlock(&so_pendfree_slock); |
|
splx(s); |
|
} |
|
|
|
static long |
|
sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space) |
|
{ |
|
struct iovec *iov = uio->uio_iov; |
|
vaddr_t sva, eva; |
|
vsize_t len; |
|
vaddr_t lva, va; |
|
int npgs, i, error; |
|
|
|
if (uio->uio_segflg != UIO_USERSPACE) |
|
return (0); |
|
|
|
if (iov->iov_len < (size_t) space) |
|
space = iov->iov_len; |
|
if (space > SOCK_LOAN_CHUNK) |
|
space = SOCK_LOAN_CHUNK; |
|
|
|
eva = round_page((vaddr_t) iov->iov_base + space); |
|
sva = trunc_page((vaddr_t) iov->iov_base); |
|
len = eva - sva; |
|
npgs = len >> PAGE_SHIFT; |
|
|
|
/* XXX KDASSERT */ |
|
KASSERT(npgs <= M_EXT_MAXPAGES); |
|
KASSERT(uio->uio_procp != NULL); |
|
|
|
lva = sokvaalloc(len, so); |
|
if (lva == 0) |
|
return 0; |
|
|
|
error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, sva, len, |
|
m->m_ext.ext_pgs, UVM_LOAN_TOPAGE); |
|
if (error) { |
|
sokvafree(lva, len); |
|
return (0); |
|
} |
|
|
|
for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE) |
|
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]), |
|
VM_PROT_READ); |
|
pmap_update(pmap_kernel()); |
|
|
|
lva += (vaddr_t) iov->iov_base & PAGE_MASK; |
|
|
|
MEXTADD(m, (caddr_t) lva, space, M_MBUF, soloanfree, so); |
|
m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP; |
|
|
|
uio->uio_resid -= space; |
|
/* uio_offset not updated, not set/used for write(2) */ |
|
uio->uio_iov->iov_base = (caddr_t) uio->uio_iov->iov_base + space; |
|
uio->uio_iov->iov_len -= space; |
|
if (uio->uio_iov->iov_len == 0) { |
|
uio->uio_iov++; |
|
uio->uio_iovcnt--; |
|
} |
|
|
|
return (space); |
|
} |
|
|
/* |
/* |
* Socket operation routines. |
* Socket operation routines. |
* These routines are called by the routines in |
* These routines are called by the routines in |
|
|
*/ |
*/ |
/*ARGSUSED*/ |
/*ARGSUSED*/ |
int |
int |
socreate(dom, aso, type, proto) |
socreate(int dom, struct socket **aso, int type, int proto, struct proc *p) |
int dom; |
{ |
struct socket **aso; |
const struct protosw *prp; |
register int type; |
struct socket *so; |
int proto; |
int error, s; |
{ |
|
struct proc *p = curproc; /* XXX */ |
|
register struct protosw *prp; |
|
register struct socket *so; |
|
register int error; |
|
int s; |
|
|
|
if (proto) |
if (proto) |
prp = pffindproto(dom, proto, type); |
prp = pffindproto(dom, proto, type); |
Line 104 socreate(dom, aso, type, proto) |
|
Line 474 socreate(dom, aso, type, proto) |
|
so->so_proto = prp; |
so->so_proto = prp; |
so->so_send = sosend; |
so->so_send = sosend; |
so->so_receive = soreceive; |
so->so_receive = soreceive; |
|
#ifdef MBUFTRACE |
|
so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner; |
|
so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner; |
|
so->so_mowner = &prp->pr_domain->dom_mowner; |
|
#endif |
if (p != 0) |
if (p != 0) |
so->so_uid = p->p_ucred->cr_uid; |
so->so_uid = p->p_ucred->cr_uid; |
|
else |
|
so->so_uid = UID_MAX; |
error = (*prp->pr_usrreq)(so, PRU_ATTACH, (struct mbuf *)0, |
error = (*prp->pr_usrreq)(so, PRU_ATTACH, (struct mbuf *)0, |
(struct mbuf *)(long)proto, (struct mbuf *)0, p); |
(struct mbuf *)(long)proto, (struct mbuf *)0, p); |
if (error) { |
if (error) { |
Line 114 socreate(dom, aso, type, proto) |
|
Line 491 socreate(dom, aso, type, proto) |
|
splx(s); |
splx(s); |
return (error); |
return (error); |
} |
} |
#ifdef COMPAT_SUNOS |
|
{ |
|
extern struct emul emul_sunos; |
|
if (p->p_emul == &emul_sunos && type == SOCK_DGRAM) |
|
so->so_options |= SO_BROADCAST; |
|
} |
|
#endif |
|
splx(s); |
splx(s); |
*aso = so; |
*aso = so; |
return (0); |
return (0); |
} |
} |
|
|
int |
int |
sobind(so, nam) |
sobind(struct socket *so, struct mbuf *nam, struct proc *p) |
struct socket *so; |
|
struct mbuf *nam; |
|
{ |
{ |
struct proc *p = curproc; /* XXX */ |
int s, error; |
int s = splsoftnet(); |
|
int error; |
|
|
|
|
s = splsoftnet(); |
error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, (struct mbuf *)0, |
error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, (struct mbuf *)0, |
nam, (struct mbuf *)0, p); |
nam, (struct mbuf *)0, p); |
splx(s); |
splx(s); |
|
|
} |
} |
|
|
int |
int |
solisten(so, backlog) |
solisten(struct socket *so, int backlog) |
register struct socket *so; |
|
int backlog; |
|
{ |
{ |
int s = splsoftnet(), error; |
int s, error; |
|
|
|
s = splsoftnet(); |
error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, (struct mbuf *)0, |
error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, (struct mbuf *)0, |
(struct mbuf *)0, (struct mbuf *)0, (struct proc *)0); |
(struct mbuf *)0, (struct mbuf *)0, (struct proc *)0); |
if (error) { |
if (error) { |
splx(s); |
splx(s); |
return (error); |
return (error); |
} |
} |
if (so->so_q.tqh_first == NULL) |
if (TAILQ_EMPTY(&so->so_q)) |
so->so_options |= SO_ACCEPTCONN; |
so->so_options |= SO_ACCEPTCONN; |
if (backlog < 0) |
if (backlog < 0) |
backlog = 0; |
backlog = 0; |
so->so_qlimit = min(backlog, SOMAXCONN); |
so->so_qlimit = min(backlog, somaxconn); |
splx(s); |
splx(s); |
return (0); |
return (0); |
} |
} |
|
|
void |
void |
sofree(so) |
sofree(struct socket *so) |
register struct socket *so; |
|
{ |
{ |
|
|
if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) |
if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) |
|
|
if (!soqremque(so, 0)) |
if (!soqremque(so, 0)) |
return; |
return; |
} |
} |
sbrelease(&so->so_snd); |
if (so->so_rcv.sb_hiwat) |
|
(void)chgsbsize(so->so_uid, &so->so_rcv.sb_hiwat, 0, |
|
RLIM_INFINITY); |
|
if (so->so_snd.sb_hiwat) |
|
(void)chgsbsize(so->so_uid, &so->so_snd.sb_hiwat, 0, |
|
RLIM_INFINITY); |
|
sbrelease(&so->so_snd, so); |
sorflush(so); |
sorflush(so); |
pool_put(&socket_pool, so); |
pool_put(&socket_pool, so); |
} |
} |
|
|
* Free socket when disconnect complete. |
* Free socket when disconnect complete. |
*/ |
*/ |
int |
int |
soclose(so) |
soclose(struct socket *so) |
register struct socket *so; |
|
{ |
{ |
struct socket *so2; |
struct socket *so2; |
int s = splsoftnet(); /* conservative */ |
int s, error; |
int error = 0; |
|
|
|
|
error = 0; |
|
s = splsoftnet(); /* conservative */ |
if (so->so_options & SO_ACCEPTCONN) { |
if (so->so_options & SO_ACCEPTCONN) { |
while ((so2 = so->so_q0.tqh_first) != 0) { |
while ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) { |
(void) soqremque(so2, 0); |
(void) soqremque(so2, 0); |
(void) soabort(so2); |
(void) soabort(so2); |
} |
} |
while ((so2 = so->so_q.tqh_first) != 0) { |
while ((so2 = TAILQ_FIRST(&so->so_q)) != 0) { |
(void) soqremque(so2, 1); |
(void) soqremque(so2, 1); |
(void) soabort(so2); |
(void) soabort(so2); |
} |
} |
|
|
} |
} |
} |
} |
} |
} |
drop: |
drop: |
if (so->so_pcb) { |
if (so->so_pcb) { |
int error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, |
int error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, |
|
|
if (error == 0) |
if (error == 0) |
error = error2; |
error = error2; |
} |
} |
discard: |
discard: |
if (so->so_state & SS_NOFDREF) |
if (so->so_state & SS_NOFDREF) |
panic("soclose: NOFDREF"); |
panic("soclose: NOFDREF"); |
so->so_state |= SS_NOFDREF; |
so->so_state |= SS_NOFDREF; |
|
|
* Must be called at splsoftnet... |
* Must be called at splsoftnet... |
*/ |
*/ |
int |
int |
soabort(so) |
soabort(struct socket *so) |
struct socket *so; |
|
{ |
{ |
|
|
return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, (struct mbuf *)0, |
return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, (struct mbuf *)0, |
|
|
} |
} |
|
|
int |
int |
soaccept(so, nam) |
soaccept(struct socket *so, struct mbuf *nam) |
register struct socket *so; |
|
struct mbuf *nam; |
|
{ |
{ |
int s = splsoftnet(); |
int s, error; |
int error; |
|
|
|
|
error = 0; |
|
s = splsoftnet(); |
if ((so->so_state & SS_NOFDREF) == 0) |
if ((so->so_state & SS_NOFDREF) == 0) |
panic("soaccept: !NOFDREF"); |
panic("soaccept: !NOFDREF"); |
so->so_state &= ~SS_NOFDREF; |
so->so_state &= ~SS_NOFDREF; |
if ((so->so_state & SS_ISDISCONNECTED) == 0) |
if ((so->so_state & SS_ISDISCONNECTED) == 0 || |
|
(so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) |
error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, |
error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, |
(struct mbuf *)0, nam, (struct mbuf *)0, (struct proc *)0); |
(struct mbuf *)0, nam, (struct mbuf *)0, (struct proc *)0); |
else |
else |
error = 0; |
error = ECONNABORTED; |
|
|
splx(s); |
splx(s); |
return (error); |
return (error); |
} |
} |
|
|
int |
int |
soconnect(so, nam) |
soconnect(struct socket *so, struct mbuf *nam, struct proc *p) |
register struct socket *so; |
|
struct mbuf *nam; |
|
{ |
{ |
struct proc *p = curproc; /* XXX */ |
int s, error; |
int s; |
|
int error; |
|
|
|
if (so->so_options & SO_ACCEPTCONN) |
if (so->so_options & SO_ACCEPTCONN) |
return (EOPNOTSUPP); |
return (EOPNOTSUPP); |
Line 307 soconnect(so, nam) |
|
Line 674 soconnect(so, nam) |
|
} |
} |
|
|
int |
int |
soconnect2(so1, so2) |
soconnect2(struct socket *so1, struct socket *so2) |
register struct socket *so1; |
|
struct socket *so2; |
|
{ |
{ |
int s = splsoftnet(); |
int s, error; |
int error; |
|
|
|
|
s = splsoftnet(); |
error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, |
error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, |
(struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0, |
(struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0, |
(struct proc *)0); |
(struct proc *)0); |
Line 322 soconnect2(so1, so2) |
|
Line 687 soconnect2(so1, so2) |
|
} |
} |
|
|
int |
int |
sodisconnect(so) |
sodisconnect(struct socket *so) |
register struct socket *so; |
|
{ |
{ |
int s = splsoftnet(); |
int s, error; |
int error; |
|
|
|
|
s = splsoftnet(); |
if ((so->so_state & SS_ISCONNECTED) == 0) { |
if ((so->so_state & SS_ISCONNECTED) == 0) { |
error = ENOTCONN; |
error = ENOTCONN; |
goto bad; |
goto bad; |
Line 339 sodisconnect(so) |
|
Line 703 sodisconnect(so) |
|
error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, |
error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, |
(struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0, |
(struct proc *)0); |
(struct proc *)0); |
bad: |
bad: |
splx(s); |
splx(s); |
|
sodopendfree(so); |
return (error); |
return (error); |
} |
} |
|
|
|
|
* Data and control buffers are freed on return. |
* Data and control buffers are freed on return. |
*/ |
*/ |
int |
int |
sosend(so, addr, uio, top, control, flags) |
sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, |
register struct socket *so; |
struct mbuf *control, int flags, struct proc *p) |
struct mbuf *addr; |
{ |
struct uio *uio; |
struct mbuf **mp, *m; |
struct mbuf *top; |
long space, len, resid, clen, mlen; |
struct mbuf *control; |
int error, s, dontroute, atomic; |
int flags; |
|
{ |
sodopendfree(so); |
struct proc *p = curproc; /* XXX */ |
|
struct mbuf **mp; |
|
register struct mbuf *m; |
|
register long space, len, resid; |
|
int clen = 0, error, s, dontroute, mlen; |
|
int atomic = sosendallatonce(so) || top; |
|
|
|
|
clen = 0; |
|
atomic = sosendallatonce(so) || top; |
if (uio) |
if (uio) |
resid = uio->uio_resid; |
resid = uio->uio_resid; |
else |
else |
Line 396 sosend(so, addr, uio, top, control, flag |
|
Line 757 sosend(so, addr, uio, top, control, flag |
|
dontroute = |
dontroute = |
(flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && |
(flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && |
(so->so_proto->pr_flags & PR_ATOMIC); |
(so->so_proto->pr_flags & PR_ATOMIC); |
p->p_stats->p_ru.ru_msgsnd++; |
if (p) |
|
p->p_stats->p_ru.ru_msgsnd++; |
if (control) |
if (control) |
clen = control->m_len; |
clen = control->m_len; |
#define snderr(errno) { error = errno; splx(s); goto release; } |
#define snderr(errno) { error = errno; splx(s); goto release; } |
|
|
restart: |
restart: |
if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0) |
if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0) |
goto out; |
goto out; |
do { |
do { |
s = splsoftnet(); |
s = splsoftnet(); |
if (so->so_state & SS_CANTSENDMORE) |
if (so->so_state & SS_CANTSENDMORE) |
snderr(EPIPE); |
snderr(EPIPE); |
if (so->so_error) |
if (so->so_error) { |
snderr(so->so_error); |
error = so->so_error; |
|
so->so_error = 0; |
|
splx(s); |
|
goto release; |
|
} |
if ((so->so_state & SS_ISCONNECTED) == 0) { |
if ((so->so_state & SS_ISCONNECTED) == 0) { |
if (so->so_proto->pr_flags & PR_CONNREQUIRED) { |
if (so->so_proto->pr_flags & PR_CONNREQUIRED) { |
if ((so->so_state & SS_ISCONFIRMING) == 0 && |
if ((so->so_state & SS_ISCONFIRMING) == 0 && |
|
|
if ((atomic && resid > so->so_snd.sb_hiwat) || |
if ((atomic && resid > so->so_snd.sb_hiwat) || |
clen > so->so_snd.sb_hiwat) |
clen > so->so_snd.sb_hiwat) |
snderr(EMSGSIZE); |
snderr(EMSGSIZE); |
if (space < resid + clen && uio && |
if (space < resid + clen && |
(atomic || space < so->so_snd.sb_lowat || space < clen)) { |
(atomic || space < so->so_snd.sb_lowat || space < clen)) { |
if (so->so_state & SS_NBIO) |
if (so->so_state & SS_NBIO) |
snderr(EWOULDBLOCK); |
snderr(EWOULDBLOCK); |
|
|
mp = ⊤ |
mp = ⊤ |
space -= clen; |
space -= clen; |
do { |
do { |
if (uio == NULL) { |
if (uio == NULL) { |
/* |
|
* Data is prepackaged in "top". |
|
*/ |
|
resid = 0; |
|
if (flags & MSG_EOR) |
|
top->m_flags |= M_EOR; |
|
} else do { |
|
if (top == 0) { |
|
MGETHDR(m, M_WAIT, MT_DATA); |
|
mlen = MHLEN; |
|
m->m_pkthdr.len = 0; |
|
m->m_pkthdr.rcvif = (struct ifnet *)0; |
|
} else { |
|
MGET(m, M_WAIT, MT_DATA); |
|
mlen = MLEN; |
|
} |
|
if (resid >= MINCLSIZE && space >= MCLBYTES) { |
|
MCLGET(m, M_WAIT); |
|
if ((m->m_flags & M_EXT) == 0) |
|
goto nopages; |
|
mlen = MCLBYTES; |
|
#ifdef MAPPED_MBUFS |
|
len = min(MCLBYTES, resid); |
|
#else |
|
if (atomic && top == 0) { |
|
len = min(MCLBYTES - max_hdr, resid); |
|
m->m_data += max_hdr; |
|
} else |
|
len = min(MCLBYTES, resid); |
|
#endif |
|
space -= len; |
|
} else { |
|
nopages: |
|
len = min(min(mlen, resid), space); |
|
space -= len; |
|
/* |
/* |
* For datagram protocols, leave room |
* Data is prepackaged in "top". |
* for protocol headers in first mbuf. |
|
*/ |
*/ |
if (atomic && top == 0 && len < mlen) |
resid = 0; |
MH_ALIGN(m, len); |
|
} |
|
error = uiomove(mtod(m, caddr_t), (int)len, uio); |
|
resid = uio->uio_resid; |
|
m->m_len = len; |
|
*mp = m; |
|
top->m_pkthdr.len += len; |
|
if (error) |
|
goto release; |
|
mp = &m->m_next; |
|
if (resid <= 0) { |
|
if (flags & MSG_EOR) |
if (flags & MSG_EOR) |
top->m_flags |= M_EOR; |
top->m_flags |= M_EOR; |
break; |
} else do { |
} |
if (top == 0) { |
} while (space > 0 && atomic); |
m = m_gethdr(M_WAIT, MT_DATA); |
if (dontroute) |
mlen = MHLEN; |
so->so_options |= SO_DONTROUTE; |
m->m_pkthdr.len = 0; |
if (resid > 0) |
m->m_pkthdr.rcvif = (struct ifnet *)0; |
so->so_state |= SS_MORETOCOME; |
} else { |
s = splsoftnet(); /* XXX */ |
m = m_get(M_WAIT, MT_DATA); |
error = (*so->so_proto->pr_usrreq)(so, |
mlen = MLEN; |
(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, |
} |
top, addr, control, p); |
MCLAIM(m, so->so_snd.sb_mowner); |
splx(s); |
if (use_sosend_loan && |
if (dontroute) |
uio->uio_iov->iov_len >= SOCK_LOAN_THRESH && |
so->so_options &= ~SO_DONTROUTE; |
space >= SOCK_LOAN_THRESH && |
if (resid > 0) |
(len = sosend_loan(so, uio, m, |
so->so_state &= ~SS_MORETOCOME; |
space)) != 0) { |
clen = 0; |
SOSEND_COUNTER_INCR(&sosend_loan_big); |
control = 0; |
space -= len; |
top = 0; |
goto have_data; |
mp = ⊤ |
} |
if (error) |
if (resid >= MINCLSIZE && space >= MCLBYTES) { |
goto release; |
SOSEND_COUNTER_INCR(&sosend_copy_big); |
|
m_clget(m, M_WAIT); |
|
if ((m->m_flags & M_EXT) == 0) |
|
goto nopages; |
|
mlen = MCLBYTES; |
|
if (atomic && top == 0) { |
|
len = lmin(MCLBYTES - max_hdr, |
|
resid); |
|
m->m_data += max_hdr; |
|
} else |
|
len = lmin(MCLBYTES, resid); |
|
space -= len; |
|
} else { |
|
nopages: |
|
SOSEND_COUNTER_INCR(&sosend_copy_small); |
|
len = lmin(lmin(mlen, resid), space); |
|
space -= len; |
|
/* |
|
* For datagram protocols, leave room |
|
* for protocol headers in first mbuf. |
|
*/ |
|
if (atomic && top == 0 && len < mlen) |
|
MH_ALIGN(m, len); |
|
} |
|
error = uiomove(mtod(m, caddr_t), (int)len, |
|
uio); |
|
have_data: |
|
resid = uio->uio_resid; |
|
m->m_len = len; |
|
*mp = m; |
|
top->m_pkthdr.len += len; |
|
if (error) |
|
goto release; |
|
mp = &m->m_next; |
|
if (resid <= 0) { |
|
if (flags & MSG_EOR) |
|
top->m_flags |= M_EOR; |
|
break; |
|
} |
|
} while (space > 0 && atomic); |
|
|
|
s = splsoftnet(); |
|
|
|
if (so->so_state & SS_CANTSENDMORE) |
|
snderr(EPIPE); |
|
|
|
if (dontroute) |
|
so->so_options |= SO_DONTROUTE; |
|
if (resid > 0) |
|
so->so_state |= SS_MORETOCOME; |
|
error = (*so->so_proto->pr_usrreq)(so, |
|
(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, |
|
top, addr, control, p); |
|
if (dontroute) |
|
so->so_options &= ~SO_DONTROUTE; |
|
if (resid > 0) |
|
so->so_state &= ~SS_MORETOCOME; |
|
splx(s); |
|
|
|
clen = 0; |
|
control = 0; |
|
top = 0; |
|
mp = ⊤ |
|
if (error) |
|
goto release; |
} while (resid && space > 0); |
} while (resid && space > 0); |
} while (resid); |
} while (resid); |
|
|
release: |
release: |
sbunlock(&so->so_snd); |
sbunlock(&so->so_snd); |
out: |
out: |
if (top) |
if (top) |
m_freem(top); |
m_freem(top); |
if (control) |
if (control) |
|
|
* only for the count in uio_resid. |
* only for the count in uio_resid. |
*/ |
*/ |
int |
int |
soreceive(so, paddr, uio, mp0, controlp, flagsp) |
soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, |
register struct socket *so; |
struct mbuf **mp0, struct mbuf **controlp, int *flagsp) |
struct mbuf **paddr; |
{ |
struct uio *uio; |
struct proc * p; |
struct mbuf **mp0; |
struct mbuf *m, **mp; |
struct mbuf **controlp; |
int flags, len, error, s, offset, moff, type, orig_resid; |
int *flagsp; |
const struct protosw *pr; |
{ |
struct mbuf *nextrecord; |
register struct mbuf *m, **mp; |
int mbuf_removed = 0; |
register int flags, len, error, s, offset; |
|
struct protosw *pr = so->so_proto; |
|
struct mbuf *nextrecord; |
|
int moff, type = 0; |
|
int orig_resid = uio->uio_resid; |
|
|
|
|
pr = so->so_proto; |
mp = mp0; |
mp = mp0; |
|
type = 0; |
|
orig_resid = uio->uio_resid; |
|
p = uio->uio_procp; |
|
|
if (paddr) |
if (paddr) |
*paddr = 0; |
*paddr = 0; |
if (controlp) |
if (controlp) |
Line 569 soreceive(so, paddr, uio, mp0, controlp, |
|
Line 952 soreceive(so, paddr, uio, mp0, controlp, |
|
flags = *flagsp &~ MSG_EOR; |
flags = *flagsp &~ MSG_EOR; |
else |
else |
flags = 0; |
flags = 0; |
|
|
|
if ((flags & MSG_DONTWAIT) == 0) |
|
sodopendfree(so); |
|
|
if (flags & MSG_OOB) { |
if (flags & MSG_OOB) { |
m = m_get(M_WAIT, MT_DATA); |
m = m_get(M_WAIT, MT_DATA); |
error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, |
error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, |
(struct mbuf *)(long)(flags & MSG_PEEK), (struct mbuf *)0, |
(struct mbuf *)(long)(flags & MSG_PEEK), |
(struct proc *)0); |
(struct mbuf *)0, p); |
if (error) |
if (error) |
goto bad; |
goto bad; |
do { |
do { |
Line 581 soreceive(so, paddr, uio, mp0, controlp, |
|
Line 968 soreceive(so, paddr, uio, mp0, controlp, |
|
(int) min(uio->uio_resid, m->m_len), uio); |
(int) min(uio->uio_resid, m->m_len), uio); |
m = m_free(m); |
m = m_free(m); |
} while (uio->uio_resid && error == 0 && m); |
} while (uio->uio_resid && error == 0 && m); |
bad: |
bad: |
if (m) |
if (m) |
m_freem(m); |
m_freem(m); |
return (error); |
return (error); |
|
|
*mp = (struct mbuf *)0; |
*mp = (struct mbuf *)0; |
if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) |
if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(struct mbuf *)0, (struct mbuf *)0, (struct proc *)0); |
(struct mbuf *)0, (struct mbuf *)0, p); |
|
|
restart: |
restart: |
if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) |
if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) |
return (error); |
return (error); |
s = splsoftnet(); |
s = splsoftnet(); |
|
|
error = EWOULDBLOCK; |
error = EWOULDBLOCK; |
goto release; |
goto release; |
} |
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
splx(s); |
splx(s); |
|
|
return (error); |
return (error); |
goto restart; |
goto restart; |
} |
} |
dontblock: |
dontblock: |
#ifdef notyet /* XXXX */ |
/* |
if (uio->uio_procp) |
* On entry here, m points to the first record of the socket buffer. |
uio->uio_procp->p_stats->p_ru.ru_msgrcv++; |
* While we process the initial mbufs containing address and control |
#endif |
* info, we save a copy of m->m_nextpkt into nextrecord. |
|
*/ |
|
if (p) |
|
p->p_stats->p_ru.ru_msgrcv++; |
|
KASSERT(m == so->so_rcv.sb_mb); |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); |
nextrecord = m->m_nextpkt; |
nextrecord = m->m_nextpkt; |
if (pr->pr_flags & PR_ADDR) { |
if (pr->pr_flags & PR_ADDR) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
|
|
m = m->m_next; |
m = m->m_next; |
} else { |
} else { |
sbfree(&so->so_rcv, m); |
sbfree(&so->so_rcv, m); |
|
mbuf_removed = 1; |
if (paddr) { |
if (paddr) { |
*paddr = m; |
*paddr = m; |
so->so_rcv.sb_mb = m->m_next; |
so->so_rcv.sb_mb = m->m_next; |
|
|
m = m->m_next; |
m = m->m_next; |
} else { |
} else { |
sbfree(&so->so_rcv, m); |
sbfree(&so->so_rcv, m); |
|
mbuf_removed = 1; |
if (controlp) { |
if (controlp) { |
if (pr->pr_domain->dom_externalize && |
struct domain *dom = pr->pr_domain; |
|
if (dom->dom_externalize && p && |
mtod(m, struct cmsghdr *)->cmsg_type == |
mtod(m, struct cmsghdr *)->cmsg_type == |
SCM_RIGHTS) |
SCM_RIGHTS) |
error = (*pr->pr_domain->dom_externalize)(m); |
error = (*dom->dom_externalize)(m, p); |
*controlp = m; |
*controlp = m; |
so->so_rcv.sb_mb = m->m_next; |
so->so_rcv.sb_mb = m->m_next; |
m->m_next = 0; |
m->m_next = 0; |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} else { |
} else { |
|
/* |
|
* Dispose of any SCM_RIGHTS message that went |
|
* through the read path rather than recv. |
|
*/ |
|
if (pr->pr_domain->dom_dispose && |
|
mtod(m, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) |
|
(*pr->pr_domain->dom_dispose)(m); |
MFREE(m, so->so_rcv.sb_mb); |
MFREE(m, so->so_rcv.sb_mb); |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} |
} |
|
|
controlp = &(*controlp)->m_next; |
controlp = &(*controlp)->m_next; |
} |
} |
} |
} |
|
|
|
/* |
|
* If m is non-NULL, we have some data to read. From now on, |
|
* make sure to keep sb_lastrecord consistent when working on |
|
* the last packet on the chain (nextrecord == NULL) and we |
|
* change m->m_nextpkt. |
|
*/ |
if (m) { |
if (m) { |
if ((flags & MSG_PEEK) == 0) |
if ((flags & MSG_PEEK) == 0) { |
m->m_nextpkt = nextrecord; |
m->m_nextpkt = nextrecord; |
|
/* |
|
* If nextrecord == NULL (this is a single chain), |
|
* then sb_lastrecord may not be valid here if m |
|
* was changed earlier. |
|
*/ |
|
if (nextrecord == NULL) { |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
so->so_rcv.sb_lastrecord = m; |
|
} |
|
} |
type = m->m_type; |
type = m->m_type; |
if (type == MT_OOBDATA) |
if (type == MT_OOBDATA) |
flags |= MSG_OOB; |
flags |= MSG_OOB; |
|
} else { |
|
if ((flags & MSG_PEEK) == 0) { |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
so->so_rcv.sb_mb = nextrecord; |
|
SB_EMPTY_FIXUP(&so->so_rcv); |
|
} |
} |
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); |
|
|
moff = 0; |
moff = 0; |
offset = 0; |
offset = 0; |
while (m && uio->uio_resid > 0 && error == 0) { |
while (m && uio->uio_resid > 0 && error == 0) { |
|
|
* block interrupts again. |
* block interrupts again. |
*/ |
*/ |
if (mp == 0) { |
if (mp == 0) { |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); |
splx(s); |
splx(s); |
error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); |
error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); |
s = splsoftnet(); |
s = splsoftnet(); |
|
if (error) { |
|
/* |
|
* If any part of the record has been removed |
|
* (such as the MT_SONAME mbuf, which will |
|
* happen when PR_ADDR, and thus also |
|
* PR_ATOMIC, is set), then drop the entire |
|
* record to maintain the atomicity of the |
|
* receive operation. |
|
* |
|
* This avoids a later panic("receive 1a") |
|
* when compiled with DIAGNOSTIC. |
|
*/ |
|
if (m && mbuf_removed |
|
&& (pr->pr_flags & PR_ATOMIC)) |
|
(void) sbdroprecord(&so->so_rcv); |
|
|
|
goto release; |
|
} |
} else |
} else |
uio->uio_resid -= len; |
uio->uio_resid -= len; |
if (len == m->m_len - moff) { |
if (len == m->m_len - moff) { |
|
|
MFREE(m, so->so_rcv.sb_mb); |
MFREE(m, so->so_rcv.sb_mb); |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} |
} |
if (m) |
/* |
|
* If m != NULL, we also know that |
|
* so->so_rcv.sb_mb != NULL. |
|
*/ |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
if (m) { |
m->m_nextpkt = nextrecord; |
m->m_nextpkt = nextrecord; |
|
if (nextrecord == NULL) |
|
so->so_rcv.sb_lastrecord = m; |
|
} else { |
|
so->so_rcv.sb_mb = nextrecord; |
|
SB_EMPTY_FIXUP(&so->so_rcv); |
|
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); |
} |
} |
} else { |
} else { |
if (flags & MSG_PEEK) |
if (flags & MSG_PEEK) |
|
|
!sosendallatonce(so) && !nextrecord) { |
!sosendallatonce(so) && !nextrecord) { |
if (so->so_error || so->so_state & SS_CANTRCVMORE) |
if (so->so_error || so->so_state & SS_CANTRCVMORE) |
break; |
break; |
|
/* |
|
* If we are peeking and the socket receive buffer is |
|
* full, stop since we can't get more data to peek at. |
|
*/ |
|
if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0) |
|
break; |
|
/* |
|
* If we've drained the socket buffer, tell the |
|
* protocol in case it needs to do something to |
|
* get it filled again. |
|
*/ |
|
if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) |
|
(*pr->pr_usrreq)(so, PRU_RCVD, |
|
(struct mbuf *)0, |
|
(struct mbuf *)(long)flags, |
|
(struct mbuf *)0, p); |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); |
error = sbwait(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
if (error) { |
if (error) { |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
|
|
(void) sbdroprecord(&so->so_rcv); |
(void) sbdroprecord(&so->so_rcv); |
} |
} |
if ((flags & MSG_PEEK) == 0) { |
if ((flags & MSG_PEEK) == 0) { |
if (m == 0) |
if (m == 0) { |
|
/* |
|
* First part is an inline SB_EMPTY_FIXUP(). Second |
|
* part makes sure sb_lastrecord is up-to-date if |
|
* there is still data in the socket buffer. |
|
*/ |
so->so_rcv.sb_mb = nextrecord; |
so->so_rcv.sb_mb = nextrecord; |
|
if (so->so_rcv.sb_mb == NULL) { |
|
so->so_rcv.sb_mbtail = NULL; |
|
so->so_rcv.sb_lastrecord = NULL; |
|
} else if (nextrecord->m_nextpkt == NULL) |
|
so->so_rcv.sb_lastrecord = nextrecord; |
|
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); |
if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(struct mbuf *)(long)flags, (struct mbuf *)0, |
(struct mbuf *)(long)flags, (struct mbuf *)0, p); |
(struct proc *)0); |
|
} |
} |
if (orig_resid == uio->uio_resid && orig_resid && |
if (orig_resid == uio->uio_resid && orig_resid && |
(flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { |
(flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { |
|
|
|
|
if (flagsp) |
if (flagsp) |
*flagsp |= flags; |
*flagsp |= flags; |
release: |
release: |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
splx(s); |
splx(s); |
return (error); |
return (error); |
} |
} |
|
|
int |
int |
soshutdown(so, how) |
soshutdown(struct socket *so, int how) |
struct socket *so; |
|
int how; |
|
{ |
{ |
struct protosw *pr = so->so_proto; |
const struct protosw *pr; |
|
|
|
pr = so->so_proto; |
if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) |
if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) |
return (EINVAL); |
return (EINVAL); |
|
|
Line 865 soshutdown(so, how) |
|
Line 1358 soshutdown(so, how) |
|
} |
} |
|
|
void |
void |
sorflush(so) |
sorflush(struct socket *so) |
register struct socket *so; |
|
{ |
{ |
register struct sockbuf *sb = &so->so_rcv; |
struct sockbuf *sb, asb; |
register struct protosw *pr = so->so_proto; |
const struct protosw *pr; |
register int s; |
int s; |
struct sockbuf asb; |
|
|
|
|
sb = &so->so_rcv; |
|
pr = so->so_proto; |
sb->sb_flags |= SB_NOINTR; |
sb->sb_flags |= SB_NOINTR; |
(void) sblock(sb, M_WAITOK); |
(void) sblock(sb, M_WAITOK); |
s = splimp(); |
s = splnet(); |
socantrcvmore(so); |
socantrcvmore(so); |
sbunlock(sb); |
sbunlock(sb); |
asb = *sb; |
asb = *sb; |
memset((caddr_t)sb, 0, sizeof(*sb)); |
/* |
|
* Clear most of the sockbuf structure, but leave some of the |
|
* fields valid. |
|
*/ |
|
memset(&sb->sb_startzero, 0, |
|
sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); |
splx(s); |
splx(s); |
if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) |
if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) |
(*pr->pr_domain->dom_dispose)(asb.sb_mb); |
(*pr->pr_domain->dom_dispose)(asb.sb_mb); |
sbrelease(&asb); |
sbrelease(&asb, so); |
} |
} |
|
|
int |
int |
sosetopt(so, level, optname, m0) |
sosetopt(struct socket *so, int level, int optname, struct mbuf *m0) |
register struct socket *so; |
|
int level, optname; |
|
struct mbuf *m0; |
|
{ |
{ |
int error = 0; |
int error; |
register struct mbuf *m = m0; |
struct mbuf *m; |
|
|
|
error = 0; |
|
m = m0; |
if (level != SOL_SOCKET) { |
if (level != SOL_SOCKET) { |
if (so->so_proto && so->so_proto->pr_ctloutput) |
if (so->so_proto && so->so_proto->pr_ctloutput) |
return ((*so->so_proto->pr_ctloutput) |
return ((*so->so_proto->pr_ctloutput) |
Line 958 sosetopt(so, level, optname, m0) |
|
Line 1455 sosetopt(so, level, optname, m0) |
|
case SO_RCVBUF: |
case SO_RCVBUF: |
if (sbreserve(optname == SO_SNDBUF ? |
if (sbreserve(optname == SO_SNDBUF ? |
&so->so_snd : &so->so_rcv, |
&so->so_snd : &so->so_rcv, |
(u_long) optval) == 0) { |
(u_long) optval, so) == 0) { |
error = ENOBUFS; |
error = ENOBUFS; |
goto bad; |
goto bad; |
} |
} |
Line 986 sosetopt(so, level, optname, m0) |
|
Line 1483 sosetopt(so, level, optname, m0) |
|
case SO_RCVTIMEO: |
case SO_RCVTIMEO: |
{ |
{ |
struct timeval *tv; |
struct timeval *tv; |
short val; |
int val; |
|
|
if (m == NULL || m->m_len < sizeof(*tv)) { |
if (m == NULL || m->m_len < sizeof(*tv)) { |
error = EINVAL; |
error = EINVAL; |
goto bad; |
goto bad; |
} |
} |
tv = mtod(m, struct timeval *); |
tv = mtod(m, struct timeval *); |
if (tv->tv_sec * hz + tv->tv_usec / tick > SHRT_MAX) { |
if (tv->tv_sec > (INT_MAX - tv->tv_usec / tick) / hz) { |
error = EDOM; |
error = EDOM; |
goto bad; |
goto bad; |
} |
} |
val = tv->tv_sec * hz + tv->tv_usec / tick; |
val = tv->tv_sec * hz + tv->tv_usec / tick; |
|
if (val == 0 && tv->tv_usec != 0) |
|
val = 1; |
|
|
switch (optname) { |
switch (optname) { |
|
|
Line 1021 sosetopt(so, level, optname, m0) |
|
Line 1520 sosetopt(so, level, optname, m0) |
|
m = NULL; /* freed by protocol */ |
m = NULL; /* freed by protocol */ |
} |
} |
} |
} |
bad: |
bad: |
if (m) |
if (m) |
(void) m_free(m); |
(void) m_free(m); |
return (error); |
return (error); |
} |
} |
|
|
int |
int |
sogetopt(so, level, optname, mp) |
sogetopt(struct socket *so, int level, int optname, struct mbuf **mp) |
register struct socket *so; |
|
int level, optname; |
|
struct mbuf **mp; |
|
{ |
{ |
register struct mbuf *m; |
struct mbuf *m; |
|
|
if (level != SOL_SOCKET) { |
if (level != SOL_SOCKET) { |
if (so->so_proto && so->so_proto->pr_ctloutput) { |
if (so->so_proto && so->so_proto->pr_ctloutput) { |
Line 1104 sogetopt(so, level, optname, mp) |
|
Line 1600 sogetopt(so, level, optname, mp) |
|
break; |
break; |
} |
} |
|
|
|
case SO_OVERFLOWED: |
|
*mtod(m, int *) = so->so_rcv.sb_overflowed; |
|
break; |
|
|
default: |
default: |
(void)m_free(m); |
(void)m_free(m); |
return (ENOPROTOOPT); |
return (ENOPROTOOPT); |
Line 1114 sogetopt(so, level, optname, mp) |
|
Line 1614 sogetopt(so, level, optname, mp) |
|
} |
} |
|
|
void |
void |
sohasoutofband(so) |
sohasoutofband(struct socket *so) |
register struct socket *so; |
|
{ |
{ |
struct proc *p; |
fownsignal(so->so_pgid, SIGURG, POLL_PRI, POLLPRI|POLLRDBAND, so); |
|
|
if (so->so_pgid < 0) |
|
gsignal(-so->so_pgid, SIGURG); |
|
else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) |
|
psignal(p, SIGURG); |
|
selwakeup(&so->so_rcv.sb_sel); |
selwakeup(&so->so_rcv.sb_sel); |
} |
} |
|
|
|
static void |
|
filt_sordetach(struct knote *kn) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext); |
|
if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist)) |
|
so->so_rcv.sb_flags &= ~SB_KNOTE; |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_soread(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
kn->kn_data = so->so_rcv.sb_cc; |
|
if (so->so_state & SS_CANTRCVMORE) { |
|
kn->kn_flags |= EV_EOF; |
|
kn->kn_fflags = so->so_error; |
|
return (1); |
|
} |
|
if (so->so_error) /* temporary udp error */ |
|
return (1); |
|
if (kn->kn_sfflags & NOTE_LOWAT) |
|
return (kn->kn_data >= kn->kn_sdata); |
|
return (kn->kn_data >= so->so_rcv.sb_lowat); |
|
} |
|
|
|
static void |
|
filt_sowdetach(struct knote *kn) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext); |
|
if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist)) |
|
so->so_snd.sb_flags &= ~SB_KNOTE; |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_sowrite(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
kn->kn_data = sbspace(&so->so_snd); |
|
if (so->so_state & SS_CANTSENDMORE) { |
|
kn->kn_flags |= EV_EOF; |
|
kn->kn_fflags = so->so_error; |
|
return (1); |
|
} |
|
if (so->so_error) /* temporary udp error */ |
|
return (1); |
|
if (((so->so_state & SS_ISCONNECTED) == 0) && |
|
(so->so_proto->pr_flags & PR_CONNREQUIRED)) |
|
return (0); |
|
if (kn->kn_sfflags & NOTE_LOWAT) |
|
return (kn->kn_data >= kn->kn_sdata); |
|
return (kn->kn_data >= so->so_snd.sb_lowat); |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_solisten(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
|
|
/* |
|
* Set kn_data to number of incoming connections, not |
|
* counting partial (incomplete) connections. |
|
*/ |
|
kn->kn_data = so->so_qlen; |
|
return (kn->kn_data > 0); |
|
} |
|
|
|
static const struct filterops solisten_filtops = |
|
{ 1, NULL, filt_sordetach, filt_solisten }; |
|
static const struct filterops soread_filtops = |
|
{ 1, NULL, filt_sordetach, filt_soread }; |
|
static const struct filterops sowrite_filtops = |
|
{ 1, NULL, filt_sowdetach, filt_sowrite }; |
|
|
|
int |
|
soo_kqfilter(struct file *fp, struct knote *kn) |
|
{ |
|
struct socket *so; |
|
struct sockbuf *sb; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
switch (kn->kn_filter) { |
|
case EVFILT_READ: |
|
if (so->so_options & SO_ACCEPTCONN) |
|
kn->kn_fop = &solisten_filtops; |
|
else |
|
kn->kn_fop = &soread_filtops; |
|
sb = &so->so_rcv; |
|
break; |
|
case EVFILT_WRITE: |
|
kn->kn_fop = &sowrite_filtops; |
|
sb = &so->so_snd; |
|
break; |
|
default: |
|
return (1); |
|
} |
|
SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext); |
|
sb->sb_flags |= SB_KNOTE; |
|
return (0); |
|
} |
|
|
|
#include <sys/sysctl.h> |
|
|
|
static int sysctl_kern_somaxkva(SYSCTLFN_PROTO); |
|
|
|
/* |
|
* sysctl helper routine for kern.somaxkva. ensures that the given |
|
* value is not too small. |
|
* (XXX should we maybe make sure it's not too large as well?) |
|
*/ |
|
static int |
|
sysctl_kern_somaxkva(SYSCTLFN_ARGS) |
|
{ |
|
int error, new_somaxkva; |
|
struct sysctlnode node; |
|
int s; |
|
|
|
new_somaxkva = somaxkva; |
|
node = *rnode; |
|
node.sysctl_data = &new_somaxkva; |
|
error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
|
if (error || newp == NULL) |
|
return (error); |
|
|
|
if (new_somaxkva < (16 * 1024 * 1024)) /* sanity */ |
|
return (EINVAL); |
|
|
|
s = splvm(); |
|
simple_lock(&so_pendfree_slock); |
|
somaxkva = new_somaxkva; |
|
wakeup(&socurkva); |
|
simple_unlock(&so_pendfree_slock); |
|
splx(s); |
|
|
|
return (error); |
|
} |
|
|
|
SYSCTL_SETUP(sysctl_kern_somaxkva_setup, "sysctl kern.somaxkva setup") |
|
{ |
|
|
|
sysctl_createv(clog, 0, NULL, NULL, |
|
CTLFLAG_PERMANENT, |
|
CTLTYPE_NODE, "kern", NULL, |
|
NULL, 0, NULL, 0, |
|
CTL_KERN, CTL_EOL); |
|
|
|
sysctl_createv(clog, 0, NULL, NULL, |
|
CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
|
CTLTYPE_INT, "somaxkva", |
|
SYSCTL_DESCR("Maximum amount of kernel memory to be " |
|
"used for socket buffers"), |
|
sysctl_kern_somaxkva, 0, NULL, 0, |
|
CTL_KERN, KERN_SOMAXKVA, CTL_EOL); |
|
} |