version 1.66.4.2, 2002/11/08 09:31:35 |
version 1.79, 2003/04/09 18:38:03 |
Line 91 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 91 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/signalvar.h> |
#include <sys/signalvar.h> |
#include <sys/resourcevar.h> |
#include <sys/resourcevar.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
|
#include <sys/event.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
|
|
struct pool socket_pool; |
struct pool socket_pool; |
|
|
|
MALLOC_DEFINE(M_SOOPTS, "soopts", "socket options"); |
|
MALLOC_DEFINE(M_SONAME, "soname", "socket name"); |
|
|
extern int somaxconn; /* patchable (XXX sysctl) */ |
extern int somaxconn; /* patchable (XXX sysctl) */ |
int somaxconn = SOMAXCONN; |
int somaxconn = SOMAXCONN; |
|
|
|
|
#endif /* SOSEND_COUNTERS */ |
#endif /* SOSEND_COUNTERS */ |
} |
} |
|
|
#ifdef SOSEND_LOAN |
#ifdef SOSEND_NO_LOAN |
int use_sosend_loan = 1; |
|
#else |
|
int use_sosend_loan = 0; |
int use_sosend_loan = 0; |
|
#else |
|
int use_sosend_loan = 1; |
#endif |
#endif |
|
|
struct mbuf *so_pendfree; |
struct mbuf *so_pendfree; |
Line 150 int sokvawaiters; |
|
Line 154 int sokvawaiters; |
|
#define SOCK_LOAN_CHUNK 65536 |
#define SOCK_LOAN_CHUNK 65536 |
|
|
static void |
static void |
sodoloanfree(caddr_t buf, u_int size) |
sodoloanfree(struct vm_page **pgs, caddr_t buf, size_t size) |
{ |
{ |
struct vm_page **pgs; |
|
vaddr_t va, sva, eva; |
vaddr_t va, sva, eva; |
vsize_t len; |
vsize_t len; |
paddr_t pa; |
paddr_t pa; |
Line 163 sodoloanfree(caddr_t buf, u_int size) |
|
Line 166 sodoloanfree(caddr_t buf, u_int size) |
|
len = eva - sva; |
len = eva - sva; |
npgs = len >> PAGE_SHIFT; |
npgs = len >> PAGE_SHIFT; |
|
|
pgs = alloca(npgs * sizeof(*pgs)); |
if (__predict_false(pgs == NULL)) { |
|
pgs = alloca(npgs * sizeof(*pgs)); |
|
|
for (i = 0, va = sva; va < eva; i++, va += PAGE_SIZE) { |
for (i = 0, va = sva; va < eva; i++, va += PAGE_SIZE) { |
if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) |
if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) |
panic("sodoloanfree: va 0x%lx not mapped", va); |
panic("sodoloanfree: va 0x%lx not mapped", va); |
pgs[i] = PHYS_TO_VM_PAGE(pa); |
pgs[i] = PHYS_TO_VM_PAGE(pa); |
|
} |
} |
} |
|
|
pmap_kremove(sva, len); |
pmap_kremove(sva, len); |
Line 197 sodopendfree(struct socket *so) |
|
Line 202 sodopendfree(struct socket *so) |
|
splx(s); |
splx(s); |
|
|
rv += m->m_ext.ext_size; |
rv += m->m_ext.ext_size; |
sodoloanfree(m->m_ext.ext_buf, m->m_ext.ext_size); |
sodoloanfree((m->m_flags & M_EXT_PAGES) ? |
|
m->m_ext.ext_pgs : NULL, m->m_ext.ext_buf, |
|
m->m_ext.ext_size); |
s = splvm(); |
s = splvm(); |
pool_cache_put(&mbpool_cache, m); |
pool_cache_put(&mbpool_cache, m); |
} |
} |
Line 210 sodopendfree(struct socket *so) |
|
Line 217 sodopendfree(struct socket *so) |
|
splx(s); |
splx(s); |
|
|
rv += m->m_ext.ext_size; |
rv += m->m_ext.ext_size; |
sodoloanfree(m->m_ext.ext_buf, m->m_ext.ext_size); |
sodoloanfree((m->m_flags & M_EXT_PAGES) ? |
|
m->m_ext.ext_pgs : NULL, m->m_ext.ext_buf, |
|
m->m_ext.ext_size); |
s = splvm(); |
s = splvm(); |
pool_cache_put(&mbpool_cache, m); |
pool_cache_put(&mbpool_cache, m); |
} |
} |
Line 220 sodopendfree(struct socket *so) |
|
Line 229 sodopendfree(struct socket *so) |
|
} |
} |
|
|
static void |
static void |
soloanfree(struct mbuf *m, caddr_t buf, u_int size, void *arg) |
soloanfree(struct mbuf *m, caddr_t buf, size_t size, void *arg) |
{ |
{ |
struct socket *so = arg; |
struct socket *so = arg; |
int s; |
int s; |
|
|
if (m == NULL) { |
if (m == NULL) { |
sodoloanfree(buf, size); |
sodoloanfree(NULL, buf, size); |
return; |
return; |
} |
} |
|
|
Line 244 sosend_loan(struct socket *so, struct ui |
|
Line 253 sosend_loan(struct socket *so, struct ui |
|
struct iovec *iov = uio->uio_iov; |
struct iovec *iov = uio->uio_iov; |
vaddr_t sva, eva; |
vaddr_t sva, eva; |
vsize_t len; |
vsize_t len; |
struct vm_page **pgs; |
|
vaddr_t lva, va; |
vaddr_t lva, va; |
int npgs, s, i, error; |
int npgs, s, i, error; |
|
|
Line 261 sosend_loan(struct socket *so, struct ui |
|
Line 269 sosend_loan(struct socket *so, struct ui |
|
len = eva - sva; |
len = eva - sva; |
npgs = len >> PAGE_SHIFT; |
npgs = len >> PAGE_SHIFT; |
|
|
|
/* XXX KDASSERT */ |
|
KASSERT(npgs <= M_EXT_MAXPAGES); |
|
|
while (socurkva + len > somaxkva) { |
while (socurkva + len > somaxkva) { |
if (sodopendfree(so)) |
if (sodopendfree(so)) |
continue; |
continue; |
Line 277 sosend_loan(struct socket *so, struct ui |
|
Line 288 sosend_loan(struct socket *so, struct ui |
|
return (0); |
return (0); |
socurkva += len; |
socurkva += len; |
|
|
pgs = alloca(npgs * sizeof(*pgs)); |
|
|
|
error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, sva, len, |
error = uvm_loan(&uio->uio_procp->p_vmspace->vm_map, sva, len, |
pgs, UVM_LOAN_TOPAGE); |
m->m_ext.ext_pgs, UVM_LOAN_TOPAGE); |
if (error) { |
if (error) { |
uvm_km_free(kernel_map, lva, len); |
uvm_km_free(kernel_map, lva, len); |
socurkva -= len; |
socurkva -= len; |
Line 288 sosend_loan(struct socket *so, struct ui |
|
Line 297 sosend_loan(struct socket *so, struct ui |
|
} |
} |
|
|
for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE) |
for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE) |
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pgs[i]), VM_PROT_READ); |
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]), |
|
VM_PROT_READ); |
pmap_update(pmap_kernel()); |
pmap_update(pmap_kernel()); |
|
|
lva += (vaddr_t) iov->iov_base & PAGE_MASK; |
lva += (vaddr_t) iov->iov_base & PAGE_MASK; |
|
|
MEXTADD(m, (caddr_t) lva, space, M_MBUF, soloanfree, so); |
MEXTADD(m, (caddr_t) lva, space, M_MBUF, soloanfree, so); |
|
m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP; |
|
|
uio->uio_resid -= space; |
uio->uio_resid -= space; |
/* uio_offset not updated, not set/used for write(2) */ |
/* uio_offset not updated, not set/used for write(2) */ |
Line 341 socreate(int dom, struct socket **aso, i |
|
Line 352 socreate(int dom, struct socket **aso, i |
|
so->so_proto = prp; |
so->so_proto = prp; |
so->so_send = sosend; |
so->so_send = sosend; |
so->so_receive = soreceive; |
so->so_receive = soreceive; |
|
#ifdef MBUFTRACE |
|
so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner; |
|
so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner; |
|
so->so_mowner = &prp->pr_domain->dom_mowner; |
|
#endif |
if (p != 0) |
if (p != 0) |
so->so_uid = p->p_ucred->cr_uid; |
so->so_uid = p->p_ucred->cr_uid; |
error = (*prp->pr_usrreq)(so, PRU_ATTACH, (struct mbuf *)0, |
error = (*prp->pr_usrreq)(so, PRU_ATTACH, (struct mbuf *)0, |
Line 677 sosend(struct socket *so, struct mbuf *a |
|
Line 693 sosend(struct socket *so, struct mbuf *a |
|
top->m_flags |= M_EOR; |
top->m_flags |= M_EOR; |
} else do { |
} else do { |
if (top == 0) { |
if (top == 0) { |
MGETHDR(m, M_WAIT, MT_DATA); |
m = m_gethdr(M_WAIT, MT_DATA); |
mlen = MHLEN; |
mlen = MHLEN; |
m->m_pkthdr.len = 0; |
m->m_pkthdr.len = 0; |
m->m_pkthdr.rcvif = (struct ifnet *)0; |
m->m_pkthdr.rcvif = (struct ifnet *)0; |
} else { |
} else { |
MGET(m, M_WAIT, MT_DATA); |
m = m_get(M_WAIT, MT_DATA); |
mlen = MLEN; |
mlen = MLEN; |
} |
} |
|
MCLAIM(m, so->so_snd.sb_mowner); |
if (use_sosend_loan && |
if (use_sosend_loan && |
uio->uio_iov->iov_len >= SOCK_LOAN_THRESH && |
uio->uio_iov->iov_len >= SOCK_LOAN_THRESH && |
space >= SOCK_LOAN_THRESH && |
space >= SOCK_LOAN_THRESH && |
Line 696 sosend(struct socket *so, struct mbuf *a |
|
Line 713 sosend(struct socket *so, struct mbuf *a |
|
} |
} |
if (resid >= MINCLSIZE && space >= MCLBYTES) { |
if (resid >= MINCLSIZE && space >= MCLBYTES) { |
SOSEND_COUNTER_INCR(&sosend_copy_big); |
SOSEND_COUNTER_INCR(&sosend_copy_big); |
MCLGET(m, M_WAIT); |
m_clget(m, M_WAIT); |
if ((m->m_flags & M_EXT) == 0) |
if ((m->m_flags & M_EXT) == 0) |
goto nopages; |
goto nopages; |
mlen = MCLBYTES; |
mlen = MCLBYTES; |
Line 894 soreceive(struct socket *so, struct mbuf |
|
Line 911 soreceive(struct socket *so, struct mbuf |
|
error = EWOULDBLOCK; |
error = EWOULDBLOCK; |
goto release; |
goto release; |
} |
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
splx(s); |
splx(s); |
Line 902 soreceive(struct socket *so, struct mbuf |
|
Line 921 soreceive(struct socket *so, struct mbuf |
|
goto restart; |
goto restart; |
} |
} |
dontblock: |
dontblock: |
|
/* |
|
* On entry here, m points to the first record of the socket buffer. |
|
* While we process the initial mbufs containing address and control |
|
* info, we save a copy of m->m_nextpkt into nextrecord. |
|
*/ |
#ifdef notyet /* XXXX */ |
#ifdef notyet /* XXXX */ |
if (uio->uio_procp) |
if (uio->uio_procp) |
uio->uio_procp->p_stats->p_ru.ru_msgrcv++; |
uio->uio_procp->p_stats->p_ru.ru_msgrcv++; |
#endif |
#endif |
|
KASSERT(m == so->so_rcv.sb_mb); |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); |
nextrecord = m->m_nextpkt; |
nextrecord = m->m_nextpkt; |
if (pr->pr_flags & PR_ADDR) { |
if (pr->pr_flags & PR_ADDR) { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
Line 958 soreceive(struct socket *so, struct mbuf |
|
Line 985 soreceive(struct socket *so, struct mbuf |
|
controlp = &(*controlp)->m_next; |
controlp = &(*controlp)->m_next; |
} |
} |
} |
} |
|
|
|
/* |
|
* If m is non-NULL, we have some data to read. From now on, |
|
* make sure to keep sb_lastrecord consistent when working on |
|
* the last packet on the chain (nextrecord == NULL) and we |
|
* change m->m_nextpkt. |
|
*/ |
if (m) { |
if (m) { |
if ((flags & MSG_PEEK) == 0) |
if ((flags & MSG_PEEK) == 0) { |
m->m_nextpkt = nextrecord; |
m->m_nextpkt = nextrecord; |
|
/* |
|
* If nextrecord == NULL (this is a single chain), |
|
* then sb_lastrecord may not be valid here if m |
|
* was changed earlier. |
|
*/ |
|
if (nextrecord == NULL) { |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
so->so_rcv.sb_lastrecord = m; |
|
} |
|
} |
type = m->m_type; |
type = m->m_type; |
if (type == MT_OOBDATA) |
if (type == MT_OOBDATA) |
flags |= MSG_OOB; |
flags |= MSG_OOB; |
|
} else { |
|
if ((flags & MSG_PEEK) == 0) { |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
so->so_rcv.sb_mb = nextrecord; |
|
SB_EMPTY_FIXUP(&so->so_rcv); |
|
} |
} |
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); |
|
|
moff = 0; |
moff = 0; |
offset = 0; |
offset = 0; |
while (m && uio->uio_resid > 0 && error == 0) { |
while (m && uio->uio_resid > 0 && error == 0) { |
Line 992 soreceive(struct socket *so, struct mbuf |
|
Line 1045 soreceive(struct socket *so, struct mbuf |
|
* block interrupts again. |
* block interrupts again. |
*/ |
*/ |
if (mp == 0) { |
if (mp == 0) { |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); |
splx(s); |
splx(s); |
error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); |
error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); |
s = splsoftnet(); |
s = splsoftnet(); |
Line 1033 soreceive(struct socket *so, struct mbuf |
|
Line 1088 soreceive(struct socket *so, struct mbuf |
|
MFREE(m, so->so_rcv.sb_mb); |
MFREE(m, so->so_rcv.sb_mb); |
m = so->so_rcv.sb_mb; |
m = so->so_rcv.sb_mb; |
} |
} |
if (m) |
/* |
|
* If m != NULL, we also know that |
|
* so->so_rcv.sb_mb != NULL. |
|
*/ |
|
KASSERT(so->so_rcv.sb_mb == m); |
|
if (m) { |
m->m_nextpkt = nextrecord; |
m->m_nextpkt = nextrecord; |
|
if (nextrecord == NULL) |
|
so->so_rcv.sb_lastrecord = m; |
|
} else { |
|
so->so_rcv.sb_mb = nextrecord; |
|
SB_EMPTY_FIXUP(&so->so_rcv); |
|
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); |
} |
} |
} else { |
} else { |
if (flags & MSG_PEEK) |
if (flags & MSG_PEEK) |
Line 1090 soreceive(struct socket *so, struct mbuf |
|
Line 1158 soreceive(struct socket *so, struct mbuf |
|
(struct mbuf *)(long)flags, |
(struct mbuf *)(long)flags, |
(struct mbuf *)0, |
(struct mbuf *)0, |
(struct proc *)0); |
(struct proc *)0); |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); |
error = sbwait(&so->so_rcv); |
error = sbwait(&so->so_rcv); |
if (error) { |
if (error) { |
sbunlock(&so->so_rcv); |
sbunlock(&so->so_rcv); |
Line 1107 soreceive(struct socket *so, struct mbuf |
|
Line 1177 soreceive(struct socket *so, struct mbuf |
|
(void) sbdroprecord(&so->so_rcv); |
(void) sbdroprecord(&so->so_rcv); |
} |
} |
if ((flags & MSG_PEEK) == 0) { |
if ((flags & MSG_PEEK) == 0) { |
if (m == 0) |
if (m == 0) { |
|
/* |
|
* First part is an inline SB_EMPTY_FIXUP(). Second |
|
* part makes sure sb_lastrecord is up-to-date if |
|
* there is still data in the socket buffer. |
|
*/ |
so->so_rcv.sb_mb = nextrecord; |
so->so_rcv.sb_mb = nextrecord; |
|
if (so->so_rcv.sb_mb == NULL) { |
|
so->so_rcv.sb_mbtail = NULL; |
|
so->so_rcv.sb_lastrecord = NULL; |
|
} else if (nextrecord->m_nextpkt == NULL) |
|
so->so_rcv.sb_lastrecord = nextrecord; |
|
} |
|
SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); |
|
SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); |
if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0, |
(struct mbuf *)(long)flags, (struct mbuf *)0, |
(struct mbuf *)(long)flags, (struct mbuf *)0, |
Line 1274 sosetopt(struct socket *so, int level, i |
|
Line 1357 sosetopt(struct socket *so, int level, i |
|
goto bad; |
goto bad; |
} |
} |
tv = mtod(m, struct timeval *); |
tv = mtod(m, struct timeval *); |
if (tv->tv_sec * hz + tv->tv_usec / tick > SHRT_MAX) { |
if (tv->tv_sec > (SHRT_MAX - tv->tv_usec / tick) / hz) { |
error = EDOM; |
error = EDOM; |
goto bad; |
goto bad; |
} |
} |
val = tv->tv_sec * hz + tv->tv_usec / tick; |
val = tv->tv_sec * hz + tv->tv_usec / tick; |
|
if (val == 0 && tv->tv_usec != 0) |
|
val = 1; |
|
|
switch (optname) { |
switch (optname) { |
|
|
Line 1402 sohasoutofband(struct socket *so) |
|
Line 1487 sohasoutofband(struct socket *so) |
|
psignal(p, SIGURG); |
psignal(p, SIGURG); |
selwakeup(&so->so_rcv.sb_sel); |
selwakeup(&so->so_rcv.sb_sel); |
} |
} |
|
|
|
static void |
|
filt_sordetach(struct knote *kn) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext); |
|
if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist)) |
|
so->so_rcv.sb_flags &= ~SB_KNOTE; |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_soread(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
kn->kn_data = so->so_rcv.sb_cc; |
|
if (so->so_state & SS_CANTRCVMORE) { |
|
kn->kn_flags |= EV_EOF; |
|
kn->kn_fflags = so->so_error; |
|
return (1); |
|
} |
|
if (so->so_error) /* temporary udp error */ |
|
return (1); |
|
if (kn->kn_sfflags & NOTE_LOWAT) |
|
return (kn->kn_data >= kn->kn_sdata); |
|
return (kn->kn_data >= so->so_rcv.sb_lowat); |
|
} |
|
|
|
static void |
|
filt_sowdetach(struct knote *kn) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext); |
|
if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist)) |
|
so->so_snd.sb_flags &= ~SB_KNOTE; |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_sowrite(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
kn->kn_data = sbspace(&so->so_snd); |
|
if (so->so_state & SS_CANTSENDMORE) { |
|
kn->kn_flags |= EV_EOF; |
|
kn->kn_fflags = so->so_error; |
|
return (1); |
|
} |
|
if (so->so_error) /* temporary udp error */ |
|
return (1); |
|
if (((so->so_state & SS_ISCONNECTED) == 0) && |
|
(so->so_proto->pr_flags & PR_CONNREQUIRED)) |
|
return (0); |
|
if (kn->kn_sfflags & NOTE_LOWAT) |
|
return (kn->kn_data >= kn->kn_sdata); |
|
return (kn->kn_data >= so->so_snd.sb_lowat); |
|
} |
|
|
|
/*ARGSUSED*/ |
|
static int |
|
filt_solisten(struct knote *kn, long hint) |
|
{ |
|
struct socket *so; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
|
|
/* |
|
* Set kn_data to number of incoming connections, not |
|
* counting partial (incomplete) connections. |
|
*/ |
|
kn->kn_data = so->so_qlen; |
|
return (kn->kn_data > 0); |
|
} |
|
|
|
static const struct filterops solisten_filtops = |
|
{ 1, NULL, filt_sordetach, filt_solisten }; |
|
static const struct filterops soread_filtops = |
|
{ 1, NULL, filt_sordetach, filt_soread }; |
|
static const struct filterops sowrite_filtops = |
|
{ 1, NULL, filt_sowdetach, filt_sowrite }; |
|
|
|
int |
|
soo_kqfilter(struct file *fp, struct knote *kn) |
|
{ |
|
struct socket *so; |
|
struct sockbuf *sb; |
|
|
|
so = (struct socket *)kn->kn_fp->f_data; |
|
switch (kn->kn_filter) { |
|
case EVFILT_READ: |
|
if (so->so_options & SO_ACCEPTCONN) |
|
kn->kn_fop = &solisten_filtops; |
|
else |
|
kn->kn_fop = &soread_filtops; |
|
sb = &so->so_rcv; |
|
break; |
|
case EVFILT_WRITE: |
|
kn->kn_fop = &sowrite_filtops; |
|
sb = &so->so_snd; |
|
break; |
|
default: |
|
return (1); |
|
} |
|
SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext); |
|
sb->sb_flags |= SB_KNOTE; |
|
return (0); |
|
} |
|
|