Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/uipc_socket.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/uipc_socket.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.200.4.1 retrieving revision 1.255.2.3 diff -u -p -r1.200.4.1 -r1.255.2.3 --- src/sys/kern/uipc_socket.c 2011/03/05 20:55:24 1.200.4.1 +++ src/sys/kern/uipc_socket.c 2018/06/09 15:16:30 1.255.2.3 @@ -1,4 +1,4 @@ -/* $NetBSD: uipc_socket.c,v 1.200.4.1 2011/03/05 20:55:24 rmind Exp $ */ +/* $NetBSD: uipc_socket.c,v 1.255.2.3 2018/06/09 15:16:30 martin Exp $ */ /*- * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -62,15 +62,26 @@ * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95 */ +/* + * Socket operation routines. + * + * These routines are called by the routines in sys_socket.c or from a + * system process, and implement the semantics of socket operations by + * switching out to the protocol specific routines. + */ + #include -__KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.200.4.1 2011/03/05 20:55:24 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.255.2.3 2018/06/09 15:16:30 martin Exp $"); +#ifdef _KERNEL_OPT #include "opt_compat_netbsd.h" #include "opt_sock_counters.h" #include "opt_sosend_loan.h" #include "opt_mbuftrace.h" #include "opt_somaxkva.h" #include "opt_multiprocessor.h" /* XXX */ +#include "opt_sctp.h" +#endif #include #include @@ -92,6 +103,7 @@ __KERNEL_RCSID(0, "$NetBSD: uipc_socket. #include #include #include +#include #ifdef COMPAT_50 #include @@ -102,7 +114,6 @@ __KERNEL_RCSID(0, "$NetBSD: uipc_socket. #include #include -MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options"); MALLOC_DEFINE(M_SONAME, "soname", "socket name"); extern const struct fileops socketops; @@ -135,8 +146,6 @@ EVCNT_ATTACH_STATIC(sosend_kvalimit); #endif /* SOSEND_COUNTERS */ -static struct callback_entry sokva_reclaimerentry; - #if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR) int sock_loan_thresh = -1; #else @@ -144,7 +153,7 @@ int sock_loan_thresh = 4096; #endif static kmutex_t so_pendfree_lock; -static struct mbuf *so_pendfree; +static struct mbuf *so_pendfree = NULL; #ifndef SOMAXKVA #define SOMAXKVA (16 * 1024 * 1024) @@ -157,10 +166,11 @@ static kauth_listener_t socket_listener; #define SOCK_LOAN_CHUNK 65536 -static size_t sodopendfree(void); -static size_t sodopendfreel(void); +static void sopendfree_thread(void *); +static kcondvar_t pendfree_thread_cv; +static lwp_t *sopendfree_lwp; -static void sysctl_kern_somaxkva_setup(void); +static void sysctl_kern_socket_setup(void); static struct sysctllog *socket_sysctllog; static vsize_t @@ -170,21 +180,6 @@ sokvareserve(struct socket *so, vsize_t mutex_enter(&so_pendfree_lock); while (socurkva + len > somaxkva) { - size_t freed; - - /* - * try to do pendfree. - */ - - freed = sodopendfreel(); - - /* - * if some kva was freed, try again. - */ - - if (freed) - continue; - SOSEND_COUNTER_INCR(&sosend_kvalimit); error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock); if (error) { @@ -212,7 +207,7 @@ sokvaunreserve(vsize_t len) */ vaddr_t -sokvaalloc(vsize_t len, struct socket *so) +sokvaalloc(vaddr_t sva, vsize_t len, struct socket *so) { vaddr_t lva; @@ -227,7 +222,8 @@ sokvaalloc(vsize_t len, struct socket *s * allocate kva. */ - lva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); + lva = uvm_km_alloc(kernel_map, len, atop(sva) & uvmexp.colormask, + UVM_KMF_COLORMATCH | UVM_KMF_VAONLY | UVM_KMF_WAITVA); if (lva == 0) { sokvaunreserve(len); return (0); @@ -277,56 +273,46 @@ sodoloanfree(struct vm_page **pgs, void sokvafree(sva, len); } -static size_t -sodopendfree(void) -{ - size_t rv; - - if (__predict_true(so_pendfree == NULL)) - return 0; - - mutex_enter(&so_pendfree_lock); - rv = sodopendfreel(); - mutex_exit(&so_pendfree_lock); - - return rv; -} - /* - * sodopendfreel: free mbufs on "pendfree" list. + * sopendfree_thread: free mbufs on "pendfree" list. * unlock and relock so_pendfree_lock when freeing mbufs. - * - * => called with so_pendfree_lock held. */ -static size_t -sodopendfreel(void) +static void +sopendfree_thread(void *v) { struct mbuf *m, *next; - size_t rv = 0; - - KASSERT(mutex_owned(&so_pendfree_lock)); + size_t rv; - while (so_pendfree != NULL) { - m = so_pendfree; - so_pendfree = NULL; - mutex_exit(&so_pendfree_lock); + mutex_enter(&so_pendfree_lock); - for (; m != NULL; m = next) { - next = m->m_next; - KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0); - KASSERT(m->m_ext.ext_refcnt == 0); + for (;;) { + rv = 0; + while (so_pendfree != NULL) { + m = so_pendfree; + so_pendfree = NULL; + mutex_exit(&so_pendfree_lock); + + for (; m != NULL; m = next) { + next = m->m_next; + KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) == + 0); + KASSERT(m->m_ext.ext_refcnt == 0); + + rv += m->m_ext.ext_size; + sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf, + m->m_ext.ext_size); + pool_cache_put(mb_cache, m); + } - rv += m->m_ext.ext_size; - sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf, - m->m_ext.ext_size); - pool_cache_put(mb_cache, m); + mutex_enter(&so_pendfree_lock); } - - mutex_enter(&so_pendfree_lock); + if (rv) + cv_broadcast(&socurkva_cv); + cv_wait(&pendfree_thread_cv, &so_pendfree_lock); } - - return (rv); + panic("sopendfree_thread"); + /* NOTREACHED */ } void @@ -345,7 +331,7 @@ soloanfree(struct mbuf *m, void *buf, si mutex_enter(&so_pendfree_lock); m->m_next = so_pendfree; so_pendfree = m; - cv_broadcast(&socurkva_cv); + cv_signal(&pendfree_thread_cv); mutex_exit(&so_pendfree_lock); } @@ -375,7 +361,7 @@ sosend_loan(struct socket *so, struct ui KASSERT(npgs <= M_EXT_MAXPAGES); - lva = sokvaalloc(len, so); + lva = sokvaalloc(sva, len, so); if (lva == 0) return 0; @@ -408,20 +394,6 @@ sosend_loan(struct socket *so, struct ui return (space); } -static int -sokva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg) -{ - - KASSERT(ce == &sokva_reclaimerentry); - KASSERT(obj == NULL); - - sodopendfree(); - if (!vm_map_starved_p(kernel_map)) { - return CALLBACK_CHAIN_ABORT; - } - return CALLBACK_CHAIN_CONTINUE; -} - struct mbuf * getsombuf(struct socket *so, int type) { @@ -455,7 +427,7 @@ socket_listener_cb(kauth_cred_t cred, ka /* Normal users can only drop their own connections. */ struct socket *so = (struct socket *)arg1; - if (proc_uidmatch(cred, so->so_cred)) + if (so->so_cred && proc_uidmatch(cred, so->so_cred) == 0) result = KAUTH_RESULT_ALLOW; break; @@ -463,17 +435,20 @@ socket_listener_cb(kauth_cred_t cred, ka case KAUTH_REQ_NETWORK_SOCKET_OPEN: /* We allow "raw" routing/bluetooth sockets to anyone. */ - if ((u_long)arg1 == PF_ROUTE || (u_long)arg1 == PF_OROUTE - || (u_long)arg1 == PF_BLUETOOTH) { + switch ((u_long)arg1) { + case PF_ROUTE: + case PF_OROUTE: + case PF_BLUETOOTH: + case PF_CAN: result = KAUTH_RESULT_ALLOW; - } else { + break; + default: /* Privileged, let secmodel handle this. */ if ((u_long)arg2 == SOCK_RAW) break; + result = KAUTH_RESULT_ALLOW; + break; } - - result = KAUTH_RESULT_ALLOW; - break; case KAUTH_REQ_NETWORK_SOCKET_CANSEE: @@ -492,32 +467,37 @@ void soinit(void) { - sysctl_kern_somaxkva_setup(); + sysctl_kern_socket_setup(); mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM); softnet_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); cv_init(&socurkva_cv, "sokva"); + cv_init(&pendfree_thread_cv, "sopendfr"); soinit2(); /* Set the initial adjusted socket buffer size. */ if (sb_max_set(sb_max)) panic("bad initial sb_max value: %lu", sb_max); - callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback, - &sokva_reclaimerentry, NULL, sokva_reclaim_callback); - socket_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, socket_listener_cb, NULL); } +void +soinit1(void) +{ + int error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, + sopendfree_thread, NULL, &sopendfree_lwp, "sopendfree"); + if (error) + panic("soinit1 %d", error); +} + /* - * Socket operation routines. - * These routines are called by the routines in - * sys_socket.c or from a system process, and - * implement the semantics of socket operations by - * switching out to the protocol specific routines. + * socreate: create a new socket of the specified type and the protocol. + * + * => Caller may specify another socket for lock sharing (must not be held). + * => Returns the new socket without lock held. */ -/*ARGSUSED*/ int socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l, struct socket *lockso) @@ -547,7 +527,7 @@ socreate(int dom, struct socket **aso, i return EPROTOTYPE; return EPROTONOSUPPORT; } - if (prp->pr_usrreq == NULL) + if (prp->pr_usrreqs == NULL) return EPROTONOSUPPORT; if (prp->pr_type != type) return EPROTOTYPE; @@ -565,55 +545,75 @@ socreate(int dom, struct socket **aso, i uid = kauth_cred_geteuid(l->l_cred); so->so_uidinfo = uid_find(uid); so->so_cpid = l->l_proc->p_pid; - if (lockso != NULL) { - /* Caller wants us to share a lock. */ + + /* + * Lock assigned and taken during PCB attach, unless we share + * the lock with another socket, e.g. socketpair(2) case. + */ + if (lockso) { lock = lockso->so_lock; so->so_lock = lock; mutex_obj_hold(lock); mutex_enter(lock); - } else { - /* Lock assigned and taken during PRU_ATTACH. */ } - error = (*prp->pr_usrreq)(so, PRU_ATTACH, NULL, - (struct mbuf *)(long)proto, NULL, l); + + /* Attach the PCB (returns with the socket lock held). */ + error = (*prp->pr_usrreqs->pr_attach)(so, proto); KASSERT(solocked(so)); - if (error != 0) { + + if (error) { + KASSERT(so->so_pcb == NULL); so->so_state |= SS_NOFDREF; sofree(so); return error; } so->so_cred = kauth_cred_dup(l->l_cred); sounlock(so); + *aso = so; return 0; } -/* On success, write file descriptor to fdout and return zero. On - * failure, return non-zero; *fdout will be undefined. +/* + * fsocreate: create a socket and a file descriptor associated with it. + * + * => On success, write file descriptor to fdout and return zero. + * => On failure, return non-zero; *fdout will be undefined. */ int -fsocreate(int domain, struct socket **sop, int type, int protocol, - struct lwp *l, int *fdout) +fsocreate(int domain, struct socket **sop, int type, int proto, int *fdout) { - struct socket *so; - struct file *fp; - int fd, error; + lwp_t *l = curlwp; + int error, fd, flags; + struct socket *so; + struct file *fp; - if ((error = fd_allocfile(&fp, &fd)) != 0) - return (error); - fp->f_flag = FREAD|FWRITE; + if ((error = fd_allocfile(&fp, &fd)) != 0) { + return error; + } + flags = type & SOCK_FLAGS_MASK; + fd_set_exclose(l, fd, (flags & SOCK_CLOEXEC) != 0); + fp->f_flag = FREAD|FWRITE|((flags & SOCK_NONBLOCK) ? FNONBLOCK : 0)| + ((flags & SOCK_NOSIGPIPE) ? FNOSIGPIPE : 0); fp->f_type = DTYPE_SOCKET; fp->f_ops = &socketops; - error = socreate(domain, &so, type, protocol, l, NULL); - if (error != 0) { + + type &= ~SOCK_FLAGS_MASK; + error = socreate(domain, &so, type, proto, l, NULL); + if (error) { fd_abort(curproc, fp, fd); - } else { - if (sop != NULL) - *sop = so; - fp->f_data = so; - fd_affix(curproc, fp, fd); - *fdout = fd; + return error; } + if (flags & SOCK_NONBLOCK) { + so->so_state |= SS_NBIO; + } + fp->f_socket = so; + fd_affix(curproc, fp, fd); + + if (sop != NULL) { + *sop = so; + } + *fdout = fd; return error; } @@ -631,12 +631,16 @@ sofamily(const struct socket *so) } int -sobind(struct socket *so, struct mbuf *nam, struct lwp *l) +sobind(struct socket *so, struct sockaddr *nam, struct lwp *l) { int error; solock(so); - error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, l); + if (nam->sa_family != so->so_proto->pr_domain->dom_family) { + sounlock(so); + return EAFNOSUPPORT; + } + error = (*so->so_proto->pr_usrreqs->pr_bind)(so, nam, l); sounlock(so); return error; } @@ -645,24 +649,29 @@ int solisten(struct socket *so, int backlog, struct lwp *l) { int error; + short oldopt, oldqlimit; solock(so); - if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | + if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) != 0) { - sounlock(so); - return (EOPNOTSUPP); - } - error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, - NULL, NULL, l); - if (error != 0) { sounlock(so); - return error; + return EINVAL; } + oldopt = so->so_options; + oldqlimit = so->so_qlimit; if (TAILQ_EMPTY(&so->so_q)) so->so_options |= SO_ACCEPTCONN; if (backlog < 0) backlog = 0; so->so_qlimit = min(backlog, somaxconn); + + error = (*so->so_proto->pr_usrreqs->pr_listen)(so, l); + if (error != 0) { + so->so_options = oldopt; + so->so_qlimit = oldqlimit; + sounlock(so); + return error; + } sounlock(so); return 0; } @@ -710,18 +719,15 @@ sofree(struct socket *so) } /* - * Close a socket on last file table reference removal. - * Initiate disconnect if connected. - * Free socket when disconnect complete. + * soclose: close a socket on last file table reference removal. + * Initiate disconnect if connected. Free socket when disconnect complete. */ int soclose(struct socket *so) { - struct socket *so2; - int error; - int error2; + struct socket *so2; + int error = 0; - error = 0; solock(so); if (so->so_options & SO_ACCEPTCONN) { for (;;) { @@ -744,7 +750,7 @@ soclose(struct socket *so) break; } } - if (so->so_pcb == 0) + if (so->so_pcb == NULL) goto discard; if (so->so_state & SS_ISCONNECTED) { if ((so->so_state & SS_ISDISCONNECTING) == 0) { @@ -753,7 +759,8 @@ soclose(struct socket *so) goto drop; } if (so->so_options & SO_LINGER) { - if ((so->so_state & SS_ISDISCONNECTING) && so->so_nbio) + if ((so->so_state & (SS_ISDISCONNECTING|SS_NBIO)) == + (SS_ISDISCONNECTING|SS_NBIO)) goto drop; while (so->so_state & SS_ISCONNECTED) { error = sowait(so, true, so->so_linger * hz); @@ -764,18 +771,15 @@ soclose(struct socket *so) } drop: if (so->so_pcb) { - error2 = (*so->so_proto->pr_usrreq)(so, PRU_DETACH, - NULL, NULL, NULL, NULL); - if (error == 0) - error = error2; + KASSERT(solocked(so)); + (*so->so_proto->pr_usrreqs->pr_detach)(so); } discard: - if (so->so_state & SS_NOFDREF) - panic("soclose: NOFDREF"); + KASSERT((so->so_state & SS_NOFDREF) == 0); kauth_cred_free(so->so_cred); so->so_state |= SS_NOFDREF; sofree(so); - return (error); + return error; } /* @@ -786,13 +790,12 @@ soabort(struct socket *so) { u_int refs; int error; - + KASSERT(solocked(so)); KASSERT(so->so_head == NULL); so->so_aborting++; /* XXX */ - error = (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, - NULL, NULL, NULL); + error = (*so->so_proto->pr_usrreqs->pr_abort)(so); refs = --so->so_aborting; /* XXX */ if (error || (refs == 0)) { sofree(so); @@ -803,35 +806,32 @@ soabort(struct socket *so) } int -soaccept(struct socket *so, struct mbuf *nam) +soaccept(struct socket *so, struct sockaddr *nam) { - int error; + int error; KASSERT(solocked(so)); + KASSERT((so->so_state & SS_NOFDREF) != 0); - error = 0; - if ((so->so_state & SS_NOFDREF) == 0) - panic("soaccept: !NOFDREF"); so->so_state &= ~SS_NOFDREF; if ((so->so_state & SS_ISDISCONNECTED) == 0 || (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) - error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, - NULL, nam, NULL, NULL); + error = (*so->so_proto->pr_usrreqs->pr_accept)(so, nam); else error = ECONNABORTED; - return (error); + return error; } int -soconnect(struct socket *so, struct mbuf *nam, struct lwp *l) +soconnect(struct socket *so, struct sockaddr *nam, struct lwp *l) { - int error; + int error; KASSERT(solocked(so)); if (so->so_options & SO_ACCEPTCONN) - return (EOPNOTSUPP); + return EOPNOTSUPP; /* * If protocol is connection-based, can only connect once. * Otherwise, if connected, try to disconnect first. @@ -840,24 +840,24 @@ soconnect(struct socket *so, struct mbuf */ if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && ((so->so_proto->pr_flags & PR_CONNREQUIRED) || - (error = sodisconnect(so)))) + (error = sodisconnect(so)))) { error = EISCONN; - else - error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, - NULL, nam, NULL, l); - return (error); + } else { + if (nam->sa_family != so->so_proto->pr_domain->dom_family) { + return EAFNOSUPPORT; + } + error = (*so->so_proto->pr_usrreqs->pr_connect)(so, nam, l); + } + + return error; } int soconnect2(struct socket *so1, struct socket *so2) { - int error; - KASSERT(solocked2(so1, so2)); - error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, - NULL, (struct mbuf *)so2, NULL, NULL); - return (error); + return (*so1->so_proto->pr_usrreqs->pr_connect2)(so1, so2); } int @@ -872,10 +872,8 @@ sodisconnect(struct socket *so) } else if (so->so_state & SS_ISDISCONNECTING) { error = EALREADY; } else { - error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, - NULL, NULL, NULL, NULL); + error = (*so->so_proto->pr_usrreqs->pr_disconnect)(so); } - sodopendfree(); return (error); } @@ -898,17 +896,14 @@ sodisconnect(struct socket *so) * Data and control buffers are freed on return. */ int -sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, - struct mbuf *control, int flags, struct lwp *l) +sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, + struct mbuf *top, struct mbuf *control, int flags, struct lwp *l) { struct mbuf **mp, *m; - struct proc *p; long space, len, resid, clen, mlen; int error, s, dontroute, atomic; short wakeup_state = 0; - p = l->l_proc; - sodopendfree(); clen = 0; /* @@ -955,12 +950,11 @@ sosend(struct socket *so, struct mbuf *a } if ((so->so_state & SS_ISCONNECTED) == 0) { if (so->so_proto->pr_flags & PR_CONNREQUIRED) { - if ((so->so_state & SS_ISCONFIRMING) == 0 && - !(resid == 0 && clen != 0)) { + if (resid || clen == 0) { error = ENOTCONN; goto release; } - } else if (addr == 0) { + } else if (addr == NULL) { error = EDESTADDRREQ; goto release; } @@ -975,7 +969,7 @@ sosend(struct socket *so, struct mbuf *a } if (space < resid + clen && (atomic || space < so->so_snd.sb_lowat || space < clen)) { - if (so->so_nbio) { + if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) { error = EWOULDBLOCK; goto release; } @@ -1008,7 +1002,7 @@ sosend(struct socket *so, struct mbuf *a m = m_gethdr(M_WAIT, MT_DATA); mlen = MHLEN; m->m_pkthdr.len = 0; - m->m_pkthdr.rcvif = NULL; + m_reset_rcvif(m); } else { m = m_get(M_WAIT, MT_DATA); mlen = MLEN; @@ -1074,9 +1068,13 @@ sosend(struct socket *so, struct mbuf *a so->so_options |= SO_DONTROUTE; if (resid > 0) so->so_state |= SS_MORETOCOME; - error = (*so->so_proto->pr_usrreq)(so, - (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, - top, addr, control, curlwp); + if (flags & MSG_OOB) { + error = (*so->so_proto->pr_usrreqs->pr_sendoob)( + so, top, control); + } else { + error = (*so->so_proto->pr_usrreqs->pr_send)(so, + top, addr, control, l); + } if (dontroute) so->so_options &= ~SO_DONTROUTE; if (resid > 0) @@ -1161,7 +1159,8 @@ soreceive(struct socket *so, struct mbuf { struct lwp *l = curlwp; struct mbuf *m, **mp, *mt; - int atomic, flags, len, error, s, offset, moff, type, orig_resid; + size_t len, offset, moff, orig_resid; + int atomic, flags, error, s, type; const struct protosw *pr; struct mbuf *nextrecord; int mbuf_removed = 0; @@ -1184,20 +1183,16 @@ soreceive(struct socket *so, struct mbuf else flags = 0; - if ((flags & MSG_DONTWAIT) == 0) - sodopendfree(); - if (flags & MSG_OOB) { m = m_get(M_WAIT, MT_DATA); solock(so); - error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, - (struct mbuf *)(long)(flags & MSG_PEEK), NULL, l); + error = (*pr->pr_usrreqs->pr_recvoob)(so, m, flags & MSG_PEEK); sounlock(so); if (error) goto bad; do { error = uiomove(mtod(m, void *), - (int) min(uio->uio_resid, m->m_len), uio); + MIN(uio->uio_resid, m->m_len), uio); m = m_free(m); } while (uio->uio_resid > 0 && error == 0 && m); bad: @@ -1215,9 +1210,6 @@ soreceive(struct socket *so, struct mbuf */ s = splsoftnet(); solock(so); - if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) - (*pr->pr_usrreq)(so, PRU_RCVD, NULL, NULL, NULL, l); - restart: if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) { sounlock(so); @@ -1248,12 +1240,16 @@ soreceive(struct socket *so, struct mbuf if (m == NULL && so->so_rcv.sb_cc) panic("receive 1"); #endif - if (so->so_error) { + if (so->so_error || so->so_rerror) { if (m != NULL) goto dontblock; - error = so->so_error; - if ((flags & MSG_PEEK) == 0) + if (so->so_error) { + error = so->so_error; so->so_error = 0; + } else { + error = so->so_rerror; + so->so_rerror = 0; + } goto release; } if (so->so_state & SS_CANTRCVMORE) { @@ -1274,7 +1270,8 @@ soreceive(struct socket *so, struct mbuf } if (uio->uio_resid == 0) goto release; - if (so->so_nbio || (flags & MSG_DONTWAIT)) { + if ((so->so_state & SS_NBIO) || + (flags & (MSG_DONTWAIT|MSG_NBIO))) { error = EWOULDBLOCK; goto release; } @@ -1335,12 +1332,35 @@ soreceive(struct socket *so, struct mbuf m->m_next = NULL; m = so->so_rcv.sb_mb; } else { - MFREE(m, so->so_rcv.sb_mb); - m = so->so_rcv.sb_mb; + m = so->so_rcv.sb_mb = m_free(m); } sbsync(&so->so_rcv, nextrecord); } } + if (pr->pr_flags & PR_ADDR_OPT) { + /* + * For SCTP we may be getting a + * whole message OR a partial delivery. + */ + if (m->m_type == MT_SONAME) { + orig_resid = 0; + if (flags & MSG_PEEK) { + if (paddr) + *paddr = m_copy(m, 0, m->m_len); + m = m->m_next; + } else { + sbfree(&so->so_rcv, m); + if (paddr) { + *paddr = m; + so->so_rcv.sb_mb = m->m_next; + m->m_next = 0; + m = so->so_rcv.sb_mb; + } else { + m = so->so_rcv.sb_mb = m_free(m); + } + } + } + } /* * Process one or more MT_CONTROL mbufs present before any data mbufs @@ -1379,7 +1399,9 @@ soreceive(struct socket *so, struct mbuf type == SCM_RIGHTS) { sounlock(so); splx(s); - error = (*dom->dom_externalize)(cm, l); + error = (*dom->dom_externalize)(cm, l, + (flags & MSG_CMSG_CLOEXEC) ? + O_CLOEXEC : 0); s = splsoftnet(); solock(so); } @@ -1393,7 +1415,7 @@ soreceive(struct socket *so, struct mbuf */ if (dom->dom_dispose != NULL && type == SCM_RIGHTS) { - sounlock(so); + sounlock(so); (*dom->dom_dispose)(cm); solock(so); } @@ -1448,7 +1470,7 @@ soreceive(struct socket *so, struct mbuf SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); sounlock(so); splx(s); - error = uiomove(mtod(m, char *) + moff, (int)len, uio); + error = uiomove(mtod(m, char *) + moff, len, uio); s = splsoftnet(); solock(so); if (error != 0) { @@ -1473,6 +1495,10 @@ soreceive(struct socket *so, struct mbuf if (len == m->m_len - moff) { if (m->m_flags & M_EOR) flags |= MSG_EOR; +#ifdef SCTP + if (m->m_flags & M_NOTIFICATION) + flags |= MSG_NOTIFICATION; +#endif /* SCTP */ if (flags & MSG_PEEK) { m = m->m_next; moff = 0; @@ -1485,8 +1511,7 @@ soreceive(struct socket *so, struct mbuf so->so_rcv.sb_mb = m = m->m_next; *mp = NULL; } else { - MFREE(m, so->so_rcv.sb_mb); - m = so->so_rcv.sb_mb; + m = so->so_rcv.sb_mb = m_free(m); } /* * If m != NULL, we also know that @@ -1544,7 +1569,8 @@ soreceive(struct socket *so, struct mbuf */ while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && !sosendallatonce(so) && !nextrecord) { - if (so->so_error || so->so_state & SS_CANTRCVMORE) + if (so->so_error || so->so_rerror || + so->so_state & SS_CANTRCVMORE) break; /* * If we are peeking and the socket receive buffer is @@ -1558,8 +1584,7 @@ soreceive(struct socket *so, struct mbuf * get it filled again. */ if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) - (*pr->pr_usrreq)(so, PRU_RCVD, - NULL, (struct mbuf *)(long)flags, NULL, l); + (*pr->pr_usrreqs->pr_rcvd)(so, flags, l); SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); if (wakeup_state & SS_RESTARTSYS) @@ -1600,8 +1625,7 @@ soreceive(struct socket *so, struct mbuf SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) - (*pr->pr_usrreq)(so, PRU_RCVD, NULL, - (struct mbuf *)(long)flags, NULL, l); + (*pr->pr_usrreqs->pr_rcvd)(so, flags, l); } if (orig_resid == uio->uio_resid && orig_resid && (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { @@ -1635,8 +1659,7 @@ soshutdown(struct socket *so, int how) error = 0; } if (how == SHUT_WR || how == SHUT_RDWR) - error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, - NULL, NULL, NULL); + error = (*pr->pr_usrreqs->pr_shutdown)(so); return error; } @@ -1695,7 +1718,8 @@ sorflush(struct socket *so) static int sosetopt1(struct socket *so, const struct sockopt *sopt) { - int error = EINVAL, optval, opt; + int error = EINVAL, opt; + int optval = 0; /* XXX: gcc */ struct linger l; struct timeval tv; @@ -1706,22 +1730,22 @@ sosetopt1(struct socket *so, const struc KASSERT(solocked(so)); break; - case SO_LINGER: - error = sockopt_get(sopt, &l, sizeof(l)); + case SO_LINGER: + error = sockopt_get(sopt, &l, sizeof(l)); solock(so); - if (error) - break; - if (l.l_linger < 0 || l.l_linger > USHRT_MAX || - l.l_linger > (INT_MAX / hz)) { + if (error) + break; + if (l.l_linger < 0 || l.l_linger > USHRT_MAX || + l.l_linger > (INT_MAX / hz)) { error = EDOM; break; } - so->so_linger = l.l_linger; - if (l.l_onoff) - so->so_options |= SO_LINGER; - else - so->so_options &= ~SO_LINGER; - break; + so->so_linger = l.l_linger; + if (l.l_onoff) + so->so_options |= SO_LINGER; + else + so->so_options &= ~SO_LINGER; + break; case SO_DEBUG: case SO_KEEPALIVE: @@ -1732,6 +1756,7 @@ sosetopt1(struct socket *so, const struc case SO_REUSEPORT: case SO_OOBINLINE: case SO_TIMESTAMP: + case SO_NOSIGPIPE: #ifdef SO_OTIMESTAMP case SO_OTIMESTAMP: #endif @@ -1899,7 +1924,7 @@ so_setsockopt(struct lwp *l, struct sock return error; } - + /* * internal get SOL_SOCKET options */ @@ -1932,9 +1957,11 @@ sogetopt1(struct socket *so, struct sock case SO_BROADCAST: case SO_OOBINLINE: case SO_TIMESTAMP: + case SO_NOSIGPIPE: #ifdef SO_OTIMESTAMP case SO_OTIMESTAMP: #endif + case SO_ACCEPTCONN: error = sockopt_setint(sopt, (so->so_options & opt) ? 1 : 0); break; @@ -2074,7 +2101,7 @@ sockopt_destroy(struct sockopt *sopt) /* * set sockopt value * - value is copied into sockopt - * - memory is allocated when necessary, will not sleep + * - memory is allocated when necessary, will not sleep */ int sockopt_set(struct sockopt *sopt, const void *buf, size_t len) @@ -2087,8 +2114,12 @@ sockopt_set(struct sockopt *sopt, const return error; } - KASSERT(sopt->sopt_size == len); + if (sopt->sopt_size < len) + return EINVAL; + memcpy(sopt->sopt_data, buf, len); + sopt->sopt_retsize = len; + return 0; } @@ -2147,9 +2178,12 @@ sockopt_setmbuf(struct sockopt *sopt, st return error; } - KASSERT(sopt->sopt_size == len); + if (sopt->sopt_size < len) + return EINVAL; + m_copydata(m, 0, len, sopt->sopt_data); m_freem(m); + sopt->sopt_retsize = len; return 0; } @@ -2199,7 +2233,7 @@ filt_sordetach(struct knote *kn) { struct socket *so; - so = ((file_t *)kn->kn_obj)->f_data; + so = ((file_t *)kn->kn_obj)->f_socket; solock(so); SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext); if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist)) @@ -2214,7 +2248,7 @@ filt_soread(struct knote *kn, long hint) struct socket *so; int rv; - so = ((file_t *)kn->kn_obj)->f_data; + so = ((file_t *)kn->kn_obj)->f_socket; if (hint != NOTE_SUBMIT) solock(so); kn->kn_data = so->so_rcv.sb_cc; @@ -2222,11 +2256,11 @@ filt_soread(struct knote *kn, long hint) kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; rv = 1; - } else if (so->so_error) /* temporary udp error */ + } else if (so->so_error || so->so_rerror) rv = 1; else if (kn->kn_sfflags & NOTE_LOWAT) rv = (kn->kn_data >= kn->kn_sdata); - else + else rv = (kn->kn_data >= so->so_rcv.sb_lowat); if (hint != NOTE_SUBMIT) sounlock(so); @@ -2238,7 +2272,7 @@ filt_sowdetach(struct knote *kn) { struct socket *so; - so = ((file_t *)kn->kn_obj)->f_data; + so = ((file_t *)kn->kn_obj)->f_socket; solock(so); SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext); if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist)) @@ -2253,7 +2287,7 @@ filt_sowrite(struct knote *kn, long hint struct socket *so; int rv; - so = ((file_t *)kn->kn_obj)->f_data; + so = ((file_t *)kn->kn_obj)->f_socket; if (hint != NOTE_SUBMIT) solock(so); kn->kn_data = sbspace(&so->so_snd); @@ -2261,7 +2295,7 @@ filt_sowrite(struct knote *kn, long hint kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; rv = 1; - } else if (so->so_error) /* temporary udp error */ + } else if (so->so_error) rv = 1; else if (((so->so_state & SS_ISCONNECTED) == 0) && (so->so_proto->pr_flags & PR_CONNREQUIRED)) @@ -2282,7 +2316,7 @@ filt_solisten(struct knote *kn, long hin struct socket *so; int rv; - so = ((file_t *)kn->kn_obj)->f_data; + so = ((file_t *)kn->kn_obj)->f_socket; /* * Set kn_data to number of incoming connections, not @@ -2310,7 +2344,7 @@ soo_kqfilter(struct file *fp, struct kno struct socket *so; struct sockbuf *sb; - so = ((file_t *)kn->kn_obj)->f_data; + so = ((file_t *)kn->kn_obj)->f_socket; solock(so); switch (kn->kn_filter) { case EVFILT_READ: @@ -2392,6 +2426,7 @@ sopoll(struct socket *so, int events) #include static int sysctl_kern_somaxkva(SYSCTLFN_PROTO); +static int sysctl_kern_sbmax(SYSCTLFN_PROTO); /* * sysctl helper routine for kern.somaxkva. ensures that the given @@ -2422,16 +2457,35 @@ sysctl_kern_somaxkva(SYSCTLFN_ARGS) return (error); } +/* + * sysctl helper routine for kern.sbmax. Basically just ensures that + * any new value is not too small. + */ +static int +sysctl_kern_sbmax(SYSCTLFN_ARGS) +{ + int error, new_sbmax; + struct sysctlnode node; + + new_sbmax = sb_max; + node = *rnode; + node.sysctl_data = &new_sbmax; + error = sysctl_lookup(SYSCTLFN_CALL(&node)); + if (error || newp == NULL) + return (error); + + KERNEL_LOCK(1, NULL); + error = sb_max_set(new_sbmax); + KERNEL_UNLOCK_ONE(NULL); + + return (error); +} + static void -sysctl_kern_somaxkva_setup(void) +sysctl_kern_socket_setup(void) { KASSERT(socket_sysctllog == NULL); - sysctl_createv(&socket_sysctllog, 0, NULL, NULL, - CTLFLAG_PERMANENT, - CTLTYPE_NODE, "kern", NULL, - NULL, 0, NULL, 0, - CTL_KERN, CTL_EOL); sysctl_createv(&socket_sysctllog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, @@ -2440,4 +2494,11 @@ sysctl_kern_somaxkva_setup(void) "used for socket buffers"), sysctl_kern_somaxkva, 0, NULL, 0, CTL_KERN, KERN_SOMAXKVA, CTL_EOL); + + sysctl_createv(&socket_sysctllog, 0, NULL, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READWRITE, + CTLTYPE_INT, "sbmax", + SYSCTL_DESCR("Maximum socket buffer size"), + sysctl_kern_sbmax, 0, NULL, 0, + CTL_KERN, KERN_SBMAX, CTL_EOL); }