version 1.72, 1998/10/08 01:19:25 |
version 1.75, 1998/12/18 21:35:11 |
Line 164 struct ifqueue ipintrq; |
|
Line 164 struct ifqueue ipintrq; |
|
struct ipstat ipstat; |
struct ipstat ipstat; |
u_int16_t ip_id; |
u_int16_t ip_id; |
int ip_defttl; |
int ip_defttl; |
|
|
struct ipqhead ipq; |
struct ipqhead ipq; |
|
int ipq_locked; |
|
|
|
static __inline int ipq_lock_try __P((void)); |
|
static __inline void ipq_unlock __P((void)); |
|
|
|
static __inline int |
|
ipq_lock_try() |
|
{ |
|
int s; |
|
|
|
s = splimp(); |
|
if (ipq_locked) { |
|
splx(s); |
|
return (0); |
|
} |
|
ipq_locked = 1; |
|
splx(s); |
|
return (1); |
|
} |
|
|
|
static __inline void |
|
ipq_unlock() |
|
{ |
|
int s; |
|
|
|
s = splimp(); |
|
ipq_locked = 0; |
|
splx(s); |
|
} |
|
|
|
#ifdef DIAGNOSTIC |
|
#define IPQ_LOCK() \ |
|
do { \ |
|
if (ipq_lock_try() == 0) { \ |
|
printf("%s:%d: ipq already locked\n", __FILE__, __LINE__); \ |
|
panic("ipq_lock"); \ |
|
} \ |
|
} while (0) |
|
#define IPQ_LOCK_CHECK() \ |
|
do { \ |
|
if (ipq_locked == 0) { \ |
|
printf("%s:%d: ipq lock not held\n", __FILE__, __LINE__); \ |
|
panic("ipq lock check"); \ |
|
} \ |
|
} while (0) |
|
#else |
|
#define IPQ_LOCK() (void) ipq_lock_try() |
|
#define IPQ_LOCK_CHECK() /* nothing */ |
|
#endif |
|
|
|
#define IPQ_UNLOCK() ipq_unlock() |
|
|
struct pool ipqent_pool; |
struct pool ipqent_pool; |
|
|
|
|
if (ip_mtudisc != 0) |
if (ip_mtudisc != 0) |
ip_mtudisc_timeout_q = |
ip_mtudisc_timeout_q = |
rt_timer_queue_create(ip_mtudisc_timeout); |
rt_timer_queue_create(ip_mtudisc_timeout); |
|
#ifdef GATEWAY |
|
ipflow_init(); |
|
#endif |
} |
} |
|
|
struct sockaddr_in ipaddr = { sizeof(ipaddr), AF_INET }; |
struct sockaddr_in ipaddr = { sizeof(ipaddr), AF_INET }; |
|
|
* Look for queue of fragments |
* Look for queue of fragments |
* of this datagram. |
* of this datagram. |
*/ |
*/ |
|
IPQ_LOCK(); |
for (fp = ipq.lh_first; fp != NULL; fp = fp->ipq_q.le_next) |
for (fp = ipq.lh_first; fp != NULL; fp = fp->ipq_q.le_next) |
if (ip->ip_id == fp->ipq_id && |
if (ip->ip_id == fp->ipq_id && |
in_hosteq(ip->ip_src, fp->ipq_src) && |
in_hosteq(ip->ip_src, fp->ipq_src) && |
|
|
*/ |
*/ |
if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { |
if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { |
ipstat.ips_badfrags++; |
ipstat.ips_badfrags++; |
|
IPQ_UNLOCK(); |
goto bad; |
goto bad; |
} |
} |
} |
} |
|
|
ipqe = pool_get(&ipqent_pool, PR_NOWAIT); |
ipqe = pool_get(&ipqent_pool, PR_NOWAIT); |
if (ipqe == NULL) { |
if (ipqe == NULL) { |
ipstat.ips_rcvmemdrop++; |
ipstat.ips_rcvmemdrop++; |
|
IPQ_UNLOCK(); |
goto bad; |
goto bad; |
} |
} |
ipqe->ipqe_mff = mff; |
ipqe->ipqe_mff = mff; |
ipqe->ipqe_m = m; |
ipqe->ipqe_m = m; |
ipqe->ipqe_ip = ip; |
ipqe->ipqe_ip = ip; |
m = ip_reass(ipqe, fp); |
m = ip_reass(ipqe, fp); |
if (m == 0) |
if (m == 0) { |
|
IPQ_UNLOCK(); |
goto next; |
goto next; |
|
} |
ipstat.ips_reassembled++; |
ipstat.ips_reassembled++; |
ip = mtod(m, struct ip *); |
ip = mtod(m, struct ip *); |
|
hlen = ip->ip_hl << 2; |
} else |
} else |
if (fp) |
if (fp) |
ip_freef(fp); |
ip_freef(fp); |
|
IPQ_UNLOCK(); |
} else |
} else |
ip->ip_len -= hlen; |
ip->ip_len -= hlen; |
|
|
Line 546 ip_reass(ipqe, fp) |
|
Line 608 ip_reass(ipqe, fp) |
|
int hlen = ipqe->ipqe_ip->ip_hl << 2; |
int hlen = ipqe->ipqe_ip->ip_hl << 2; |
int i, next; |
int i, next; |
|
|
|
IPQ_LOCK_CHECK(); |
|
|
/* |
/* |
* Presence of header sizes in mbufs |
* Presence of header sizes in mbufs |
* would confuse code below. |
* would confuse code below. |
|
|
{ |
{ |
register struct ipqent *q, *p; |
register struct ipqent *q, *p; |
|
|
|
IPQ_LOCK_CHECK(); |
|
|
for (q = fp->ipq_fragq.lh_first; q != NULL; q = p) { |
for (q = fp->ipq_fragq.lh_first; q != NULL; q = p) { |
p = q->ipqe_q.le_next; |
p = q->ipqe_q.le_next; |
m_freem(q->ipqe_m); |
m_freem(q->ipqe_m); |
|
|
register struct ipq *fp, *nfp; |
register struct ipq *fp, *nfp; |
int s = splsoftnet(); |
int s = splsoftnet(); |
|
|
|
IPQ_LOCK(); |
for (fp = ipq.lh_first; fp != NULL; fp = nfp) { |
for (fp = ipq.lh_first; fp != NULL; fp = nfp) { |
nfp = fp->ipq_q.le_next; |
nfp = fp->ipq_q.le_next; |
if (--fp->ipq_ttl == 0) { |
if (--fp->ipq_ttl == 0) { |
|
|
ip_freef(fp); |
ip_freef(fp); |
} |
} |
} |
} |
|
IPQ_UNLOCK(); |
#ifdef GATEWAY |
#ifdef GATEWAY |
ipflow_slowtimo(); |
ipflow_slowtimo(); |
#endif |
#endif |
|
|
ip_drain() |
ip_drain() |
{ |
{ |
|
|
|
/* |
|
* We may be called from a device's interrupt context. If |
|
* the ipq is already busy, just bail out now. |
|
*/ |
|
if (ipq_lock_try() == 0) |
|
return; |
|
|
while (ipq.lh_first != NULL) { |
while (ipq.lh_first != NULL) { |
ipstat.ips_fragdropped++; |
ipstat.ips_fragdropped++; |
ip_freef(ipq.lh_first); |
ip_freef(ipq.lh_first); |
} |
} |
|
|
|
IPQ_UNLOCK(); |
} |
} |
|
|
/* |
/* |