version 1.94, 2004/03/17 10:21:59 |
version 1.96, 2004/03/21 00:54:46 |
Line 162 int sokvawaiters; |
|
Line 162 int sokvawaiters; |
|
|
|
static size_t sodopendfree(struct socket *); |
static size_t sodopendfree(struct socket *); |
static size_t sodopendfreel(struct socket *); |
static size_t sodopendfreel(struct socket *); |
|
static __inline void sokvareserve(struct socket *, vsize_t); |
|
static __inline void sokvaunreserve(vsize_t); |
|
|
/* |
static __inline void |
* sokvaalloc: allocate kva for loan. |
sokvareserve(struct socket *so, vsize_t len) |
*/ |
|
|
|
vaddr_t |
|
sokvaalloc(vsize_t len, struct socket *so) |
|
{ |
{ |
vaddr_t lva; |
|
int s; |
int s; |
|
|
/* |
|
* reserve kva. |
|
*/ |
|
|
|
s = splvm(); |
s = splvm(); |
simple_lock(&so_pendfree_slock); |
simple_lock(&so_pendfree_slock); |
while (socurkva + len > somaxkva) { |
while (socurkva + len > somaxkva) { |
Line 203 sokvaalloc(vsize_t len, struct socket *s |
|
Line 196 sokvaalloc(vsize_t len, struct socket *s |
|
socurkva += len; |
socurkva += len; |
simple_unlock(&so_pendfree_slock); |
simple_unlock(&so_pendfree_slock); |
splx(s); |
splx(s); |
|
} |
|
|
|
static __inline void |
|
sokvaunreserve(vsize_t len) |
|
{ |
|
int s; |
|
|
|
s = splvm(); |
|
simple_lock(&so_pendfree_slock); |
|
socurkva -= len; |
|
if (sokvawaiters) |
|
wakeup(&socurkva); |
|
simple_unlock(&so_pendfree_slock); |
|
splx(s); |
|
} |
|
|
|
/* |
|
* sokvaalloc: allocate kva for loan. |
|
*/ |
|
|
|
vaddr_t |
|
sokvaalloc(vsize_t len, struct socket *so) |
|
{ |
|
vaddr_t lva; |
|
|
|
/* |
|
* reserve kva. |
|
*/ |
|
|
|
sokvareserve(so, len); |
|
|
/* |
/* |
* allocate kva. |
* allocate kva. |
*/ |
*/ |
|
|
lva = uvm_km_valloc_wait(kernel_map, len); |
lva = uvm_km_valloc_wait(kernel_map, len); |
if (lva == 0) |
if (lva == 0) { |
|
sokvaunreserve(len); |
return (0); |
return (0); |
|
} |
|
|
return lva; |
return lva; |
} |
} |
Line 222 sokvaalloc(vsize_t len, struct socket *s |
|
Line 247 sokvaalloc(vsize_t len, struct socket *s |
|
void |
void |
sokvafree(vaddr_t sva, vsize_t len) |
sokvafree(vaddr_t sva, vsize_t len) |
{ |
{ |
int s; |
|
|
|
/* |
/* |
* free kva. |
* free kva. |
Line 234 sokvafree(vaddr_t sva, vsize_t len) |
|
Line 258 sokvafree(vaddr_t sva, vsize_t len) |
|
* unreserve kva. |
* unreserve kva. |
*/ |
*/ |
|
|
s = splvm(); |
sokvaunreserve(len); |
simple_lock(&so_pendfree_slock); |
|
socurkva -= len; |
|
if (sokvawaiters) |
|
wakeup(&socurkva); |
|
simple_unlock(&so_pendfree_slock); |
|
splx(s); |
|
} |
} |
|
|
static void |
static void |
Line 765 sosend(struct socket *so, struct mbuf *a |
|
Line 783 sosend(struct socket *so, struct mbuf *a |
|
if ((atomic && resid > so->so_snd.sb_hiwat) || |
if ((atomic && resid > so->so_snd.sb_hiwat) || |
clen > so->so_snd.sb_hiwat) |
clen > so->so_snd.sb_hiwat) |
snderr(EMSGSIZE); |
snderr(EMSGSIZE); |
if (space < resid + clen && uio && |
if (space < resid + clen && |
(atomic || space < so->so_snd.sb_lowat || space < clen)) { |
(atomic || space < so->so_snd.sb_lowat || space < clen)) { |
if (so->so_state & SS_NBIO) |
if (so->so_state & SS_NBIO) |
snderr(EWOULDBLOCK); |
snderr(EWOULDBLOCK); |