Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/netinet/ip_input.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/netinet/ip_input.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.233 retrieving revision 1.296.2.4 diff -u -p -r1.233 -r1.296.2.4 --- src/sys/netinet/ip_input.c 2006/10/05 17:35:19 1.233 +++ src/sys/netinet/ip_input.c 2014/05/22 11:41:09 1.296.2.4 @@ -1,4 +1,4 @@ -/* $NetBSD: ip_input.c,v 1.233 2006/10/05 17:35:19 tls Exp $ */ +/* $NetBSD: ip_input.c,v 1.296.2.4 2014/05/22 11:41:09 yamt Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. @@ -45,13 +45,6 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. - * 4. Neither the name of The NetBSD Foundation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED @@ -98,11 +91,11 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: ip_input.c,v 1.233 2006/10/05 17:35:19 tls Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ip_input.c,v 1.296.2.4 2014/05/22 11:41:09 yamt Exp $"); #include "opt_inet.h" +#include "opt_compat_netbsd.h" #include "opt_gateway.h" -#include "opt_pfil_hooks.h" #include "opt_ipsec.h" #include "opt_mrouting.h" #include "opt_mbuftrace.h" @@ -110,7 +103,6 @@ __KERNEL_RCSID(0, "$NetBSD: ip_input.c,v #include #include -#include #include #include #include @@ -135,6 +127,7 @@ __KERNEL_RCSID(0, "$NetBSD: ip_input.c,v #include #include #include +#include #include /* just for gif_ttl */ #include @@ -145,15 +138,11 @@ __KERNEL_RCSID(0, "$NetBSD: ip_input.c,v #ifdef MROUTING #include #endif +#include #ifdef IPSEC -#include -#include -#endif -#ifdef FAST_IPSEC #include -#include -#endif /* FAST_IPSEC*/ +#endif #ifndef IPFORWARDING #ifdef GATEWAY @@ -178,6 +167,11 @@ __KERNEL_RCSID(0, "$NetBSD: ip_input.c,v #define IPMTUDISCTIMEOUT (10 * 60) /* as per RFC 1191 */ #endif +#ifdef COMPAT_50 +#include +#include +#endif + /* * Note: DIRECTED_BROADCAST is handled this way so that previous * configuration using this option will Just Work. @@ -221,7 +215,6 @@ int ip_checkinterface = 0; struct rttimer_queue *ip_mtudisc_timeout_q = NULL; -int ipqmaxlen = IFQ_MAXLEN; u_long in_ifaddrhash; /* size of hash table - 1 */ int in_ifaddrentries; /* total number of addrs */ struct in_ifaddrhead in_ifaddrhead; @@ -230,112 +223,15 @@ u_long in_multihash; /* size of hash int in_multientries; /* total number of addrs */ struct in_multihashhead *in_multihashtbl; struct ifqueue ipintrq; -struct ipstat ipstat; -uint16_t ip_id; - -#ifdef PFIL_HOOKS -struct pfil_head inet_pfil_hook; -#endif - -/* - * Cached copy of nmbclusters. If nbclusters is different, - * recalculate IP parameters derived from nmbclusters. - */ -static int ip_nmbclusters; /* copy of nmbclusters */ -static void ip_nmbclusters_changed(void); /* recalc limits */ - -#define CHECK_NMBCLUSTER_PARAMS() \ -do { \ - if (__predict_false(ip_nmbclusters != nmbclusters)) \ - ip_nmbclusters_changed(); \ -} while (/*CONSTCOND*/0) - -/* IP datagram reassembly queues (hashed) */ -#define IPREASS_NHASH_LOG2 6 -#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) -#define IPREASS_HMASK (IPREASS_NHASH - 1) -#define IPREASS_HASH(x,y) \ - (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK) -struct ipqhead ipq[IPREASS_NHASH]; -int ipq_locked; -static int ip_nfragpackets; /* packets in reass queue */ -static int ip_nfrags; /* total fragments in reass queues */ -int ip_maxfragpackets = 200; /* limit on packets. XXX sysctl */ -int ip_maxfrags; /* limit on fragments. XXX sysctl */ - - -/* - * Additive-Increase/Multiplicative-Decrease (AIMD) strategy for - * IP reassembly queue buffer managment. - * - * We keep a count of total IP fragments (NB: not fragmented packets!) - * awaiting reassembly (ip_nfrags) and a limit (ip_maxfrags) on fragments. - * If ip_nfrags exceeds ip_maxfrags the limit, we drop half the - * total fragments in reassembly queues.This AIMD policy avoids - * repeatedly deleting single packets under heavy fragmentation load - * (e.g., from lossy NFS peers). - */ -static u_int ip_reass_ttl_decr(u_int ticks); -static void ip_reass_drophalf(void); - - -static inline int ipq_lock_try(void); -static inline void ipq_unlock(void); - -static inline int -ipq_lock_try(void) -{ - int s; - - /* - * Use splvm() -- we're blocking things that would cause - * mbuf allocation. - */ - s = splvm(); - if (ipq_locked) { - splx(s); - return (0); - } - ipq_locked = 1; - splx(s); - return (1); -} - -static inline void -ipq_unlock(void) -{ - int s; - - s = splvm(); - ipq_locked = 0; - splx(s); -} +ipid_state_t * ip_ids; +uint16_t ip_id; -#ifdef DIAGNOSTIC -#define IPQ_LOCK() \ -do { \ - if (ipq_lock_try() == 0) { \ - printf("%s:%d: ipq already locked\n", __FILE__, __LINE__); \ - panic("ipq_lock"); \ - } \ -} while (/*CONSTCOND*/ 0) -#define IPQ_LOCK_CHECK() \ -do { \ - if (ipq_locked == 0) { \ - printf("%s:%d: ipq lock not held\n", __FILE__, __LINE__); \ - panic("ipq lock check"); \ - } \ -} while (/*CONSTCOND*/ 0) -#else -#define IPQ_LOCK() (void) ipq_lock_try() -#define IPQ_LOCK_CHECK() /* nothing */ -#endif +percpu_t *ipstat_percpu; -#define IPQ_UNLOCK() ipq_unlock() +pfil_head_t *inet_pfil_hook; -POOL_INIT(inmulti_pool, sizeof(struct in_multi), 0, 0, 0, "inmltpl", NULL); -POOL_INIT(ipqent_pool, sizeof(struct ipqent), 0, 0, 0, "ipqepl", NULL); +struct pool inmulti_pool; #ifdef INET_CSUM_COUNTERS #include @@ -374,22 +270,16 @@ static struct ip_srcrt { struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)]; } ip_srcrt; +static int ip_drainwanted; + static void save_rte(u_char *, struct in_addr); #ifdef MBUFTRACE -struct mowner ip_rx_mowner = { "internet", "rx" }; -struct mowner ip_tx_mowner = { "internet", "tx" }; +struct mowner ip_rx_mowner = MOWNER_INIT("internet", "rx"); +struct mowner ip_tx_mowner = MOWNER_INIT("internet", "tx"); #endif -/* - * Compute IP limits derived from the value of nmbclusters. - */ -static void -ip_nmbclusters_changed(void) -{ - ip_maxfrags = nmbclusters / 4; - ip_nmbclusters = nmbclusters; -} +static void sysctl_net_inet_ip_setup(struct sysctllog **); /* * IP initialization: fill in IP protocol switch table. @@ -401,6 +291,11 @@ ip_init(void) const struct protosw *pr; int i; + sysctl_net_inet_ip_setup(NULL); + + pool_init(&inmulti_pool, sizeof(struct in_multi), 0, 0, 0, "inmltpl", + NULL, IPL_SOFTNET); + pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); if (pr == 0) panic("ip_init"); @@ -412,38 +307,33 @@ ip_init(void) pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) ip_protox[pr->pr_protocol] = pr - inetsw; - for (i = 0; i < IPREASS_NHASH; i++) - LIST_INIT(&ipq[i]); + ip_reass_init(); + ip_ids = ip_id_init(); ip_id = time_second & 0xfffff; - ipintrq.ifq_maxlen = ipqmaxlen; - ip_nmbclusters_changed(); + ipintrq.ifq_maxlen = IFQ_MAXLEN; TAILQ_INIT(&in_ifaddrhead); - in_ifaddrhashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, M_IFADDR, - M_WAITOK, &in_ifaddrhash); - in_multihashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, M_IPMADDR, - M_WAITOK, &in_multihash); + in_ifaddrhashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true, + &in_ifaddrhash); + in_multihashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true, + &in_multihash); ip_mtudisc_timeout_q = rt_timer_queue_create(ip_mtudisc_timeout); #ifdef GATEWAY - ipflow_init(); + ipflow_init(ip_hashsize); #endif -#ifdef PFIL_HOOKS /* Register our Packet Filter hook. */ - inet_pfil_hook.ph_type = PFIL_TYPE_AF; - inet_pfil_hook.ph_af = AF_INET; - i = pfil_head_register(&inet_pfil_hook); - if (i != 0) - printf("ip_init: WARNING: unable to register pfil hook, " - "error %d\n", i); -#endif /* PFIL_HOOKS */ + inet_pfil_hook = pfil_head_create(PFIL_TYPE_AF, (void *)AF_INET); + KASSERT(inet_pfil_hook != NULL); #ifdef MBUFTRACE MOWNER_ATTACH(&ip_tx_mowner); MOWNER_ATTACH(&ip_rx_mowner); #endif /* MBUFTRACE */ + + ipstat_percpu = percpu_alloc(sizeof(uint64_t) * IP_NSTATS); } struct sockaddr_in ipaddr = { @@ -460,16 +350,35 @@ ipintr(void) { int s; struct mbuf *m; + struct ifqueue lcl_intrq; + + memset(&lcl_intrq, 0, sizeof(lcl_intrq)); - while (1) { + mutex_enter(softnet_lock); + KERNEL_LOCK(1, NULL); + if (!IF_IS_EMPTY(&ipintrq)) { s = splnet(); - IF_DEQUEUE(&ipintrq, m); + + /* Take existing queue onto stack */ + lcl_intrq = ipintrq; + + /* Zero out global queue, preserving maxlen and drops */ + ipintrq.ifq_head = NULL; + ipintrq.ifq_tail = NULL; + ipintrq.ifq_len = 0; + ipintrq.ifq_maxlen = lcl_intrq.ifq_maxlen; + ipintrq.ifq_drops = lcl_intrq.ifq_drops; + splx(s); - if (m == 0) - return; - MCLAIM(m, &ip_rx_mowner); + } + KERNEL_UNLOCK_ONE(NULL); + while (!IF_IS_EMPTY(&lcl_intrq)) { + IF_DEQUEUE(&lcl_intrq, m); + if (m == NULL) + break; ip_input(m); } + mutex_exit(softnet_lock); } /* @@ -480,28 +389,15 @@ void ip_input(struct mbuf *m) { struct ip *ip = NULL; - struct ipq *fp; struct in_ifaddr *ia; struct ifaddr *ifa; - struct ipqent *ipqe; - int hlen = 0, mff, len; + int hlen = 0, len; int downmatch; int checkif; int srcrt = 0; - int s; - u_int hash; -#ifdef FAST_IPSEC - struct m_tag *mtag; - struct tdb_ident *tdbi; - struct secpolicy *sp; - int error; -#endif /* FAST_IPSEC */ MCLAIM(m, &ip_rx_mowner); -#ifdef DIAGNOSTIC - if ((m->m_flags & M_PKTHDR) == 0) - panic("ipintr no HDR"); -#endif + KASSERT((m->m_flags & M_PKTHDR) != 0); /* * If no IP addresses have been set yet but the interfaces @@ -509,39 +405,39 @@ ip_input(struct mbuf *m) */ if (TAILQ_FIRST(&in_ifaddrhead) == 0) goto bad; - ipstat.ips_total++; + IP_STATINC(IP_STAT_TOTAL); /* * If the IP header is not aligned, slurp it up into a new * mbuf with space for link headers, in the event we forward * it. Otherwise, if it is aligned, make sure the entire * base IP header is in the first mbuf of the chain. */ - if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { + if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) { if ((m = m_copyup(m, sizeof(struct ip), (max_linkhdr + 3) & ~3)) == NULL) { /* XXXJRT new stat, please */ - ipstat.ips_toosmall++; + IP_STATINC(IP_STAT_TOOSMALL); return; } } else if (__predict_false(m->m_len < sizeof (struct ip))) { if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { - ipstat.ips_toosmall++; + IP_STATINC(IP_STAT_TOOSMALL); return; } } ip = mtod(m, struct ip *); if (ip->ip_v != IPVERSION) { - ipstat.ips_badvers++; + IP_STATINC(IP_STAT_BADVERS); goto bad; } hlen = ip->ip_hl << 2; if (hlen < sizeof(struct ip)) { /* minimum header length */ - ipstat.ips_badhlen++; + IP_STATINC(IP_STAT_BADHLEN); goto bad; } if (hlen > m->m_len) { - if ((m = m_pullup(m, hlen)) == 0) { - ipstat.ips_badhlen++; + if ((m = m_pullup(m, hlen)) == NULL) { + IP_STATINC(IP_STAT_BADHLEN); return; } ip = mtod(m, struct ip *); @@ -552,7 +448,7 @@ ip_input(struct mbuf *m) * not allowed. */ if (IN_MULTICAST(ip->ip_src.s_addr)) { - ipstat.ips_badaddr++; + IP_STATINC(IP_STAT_BADADDR); goto bad; } @@ -560,7 +456,7 @@ ip_input(struct mbuf *m) if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET || (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) { if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) { - ipstat.ips_badaddr++; + IP_STATINC(IP_STAT_BADADDR); goto bad; } } @@ -598,7 +494,7 @@ ip_input(struct mbuf *m) * Check for additional length bogosity */ if (len < hlen) { - ipstat.ips_badlen++; + IP_STATINC(IP_STAT_BADLEN); goto bad; } @@ -609,7 +505,7 @@ ip_input(struct mbuf *m) * Drop packet if shorter than we expect. */ if (m->m_pkthdr.len < len) { - ipstat.ips_tooshort++; + IP_STATINC(IP_STAT_TOOSHORT); goto bad; } if (m->m_pkthdr.len > len) { @@ -620,18 +516,12 @@ ip_input(struct mbuf *m) m_adj(m, len - m->m_pkthdr.len); } -#if defined(IPSEC) - /* ipflow (IP fast forwarding) is not compatible with IPsec. */ - m->m_flags &= ~M_CANFASTFWD; -#else /* * Assume that we can create a fast-forward IP flow entry * based on this packet. */ m->m_flags |= M_CANFASTFWD; -#endif -#ifdef PFIL_HOOKS /* * Run through list of hooks for input packets. If there are any * filters which require that additional packets in the flow are @@ -643,9 +533,7 @@ ip_input(struct mbuf *m) * let ipfilter look at packet on the wire, * not the decapsulated packet. */ -#ifdef IPSEC - if (!ipsec_getnhist(m)) -#elif defined(FAST_IPSEC) +#if defined(IPSEC) if (!ipsec_indone(m)) #else if (1) @@ -654,7 +542,7 @@ ip_input(struct mbuf *m) struct in_addr odst; odst = ip->ip_dst; - if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, + if (pfil_run_hooks(inet_pfil_hook, &m, m->m_pkthdr.rcvif, PFIL_IN) != 0) return; if (m == NULL) @@ -677,7 +565,6 @@ ip_input(struct mbuf *m) */ srcrt = (odst.s_addr != ip->ip_dst.s_addr); } -#endif /* PFIL_HOOKS */ #ifdef ALTQ /* XXX Temporary until ALTQ is changed to use a pfil hook */ @@ -778,7 +665,7 @@ ip_input(struct mbuf *m) * ip_output().) */ if (ip_mforward(m, m->m_pkthdr.rcvif) != 0) { - ipstat.ips_cantforward++; + IP_STATINC(IP_STAT_CANTFORWARD); m_freem(m); return; } @@ -790,7 +677,7 @@ ip_input(struct mbuf *m) */ if (ip->ip_p == IPPROTO_IGMP) goto ours; - ipstat.ips_forward++; + IP_STATINC(IP_STAT_CANTFORWARD); } #endif /* @@ -799,7 +686,7 @@ ip_input(struct mbuf *m) */ IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm); if (inm == NULL) { - ipstat.ips_cantforward++; + IP_STATINC(IP_STAT_CANTFORWARD); m_freem(m); return; } @@ -813,7 +700,7 @@ ip_input(struct mbuf *m) * Not for us; forward if possible and desirable. */ if (ipforwarding == 0) { - ipstat.ips_cantforward++; + IP_STATINC(IP_STAT_CANTFORWARD); m_freem(m); } else { /* @@ -824,64 +711,16 @@ ip_input(struct mbuf *m) */ if (downmatch) { icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0); - ipstat.ips_cantforward++; + IP_STATINC(IP_STAT_CANTFORWARD); return; } #ifdef IPSEC - if (ipsec4_in_reject(m, NULL)) { - ipsecstat.in_polvio++; + /* Perform IPsec, if any. */ + if (ipsec4_input(m, IP_FORWARDING | (ip_directedbcast ? + IP_ALLOWBROADCAST : 0)) != 0) { goto bad; } #endif -#ifdef FAST_IPSEC - mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); - s = splsoftnet(); - if (mtag != NULL) { - tdbi = (struct tdb_ident *)(mtag + 1); - sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND); - } else { - sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, - IP_FORWARDING, &error); - } - if (sp == NULL) { /* NB: can happen if error */ - splx(s); - /*XXX error stat???*/ - DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/ - goto bad; - } - - /* - * Check security policy against packet attributes. - */ - error = ipsec_in_reject(sp, m); - KEY_FREESP(&sp); - splx(s); - if (error) { - ipstat.ips_cantforward++; - goto bad; - } - - /* - * Peek at the outbound SP for this packet to determine if - * it's a Fast Forward candidate. - */ - mtag = m_tag_find(m, PACKET_TAG_IPSEC_PENDING_TDB, NULL); - if (mtag != NULL) - m->m_flags &= ~M_CANFASTFWD; - else { - s = splsoftnet(); - sp = ipsec4_checkpolicy(m, IPSEC_DIR_OUTBOUND, - (IP_FORWARDING | - (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), - &error, NULL); - if (sp != NULL) { - m->m_flags &= ~M_CANFASTFWD; - KEY_FREESP(&sp); - } - splx(s); - } -#endif /* FAST_IPSEC */ - ip_forward(m, srcrt); } return; @@ -889,137 +728,39 @@ ip_input(struct mbuf *m) ours: /* * If offset or IP_MF are set, must reassemble. - * Otherwise, nothing need be done. - * (We could look in the reassembly queue to see - * if the packet was previously fragmented, - * but it's not worth the time; just let them time out.) */ if (ip->ip_off & ~htons(IP_DF|IP_RF)) { - /* - * Look for queue of fragments - * of this datagram. + * Pass to IP reassembly mechanism. */ - IPQ_LOCK(); - hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); - /* XXX LIST_FOREACH(fp, &ipq[hash], ipq_q) */ - for (fp = LIST_FIRST(&ipq[hash]); fp != NULL; - fp = LIST_NEXT(fp, ipq_q)) { - if (ip->ip_id == fp->ipq_id && - in_hosteq(ip->ip_src, fp->ipq_src) && - in_hosteq(ip->ip_dst, fp->ipq_dst) && - ip->ip_p == fp->ipq_p) - goto found; - + if (ip_reass_packet(&m, ip) != 0) { + /* Failed; invalid fragment(s) or packet. */ + goto bad; } - fp = 0; -found: - - /* - * Adjust ip_len to not reflect header, - * set ipqe_mff if more fragments are expected, - * convert offset of this to bytes. - */ - ip->ip_len = htons(ntohs(ip->ip_len) - hlen); - mff = (ip->ip_off & htons(IP_MF)) != 0; - if (mff) { - /* - * Make sure that fragments have a data length - * that's a non-zero multiple of 8 bytes. - */ - if (ntohs(ip->ip_len) == 0 || - (ntohs(ip->ip_len) & 0x7) != 0) { - ipstat.ips_badfrags++; - IPQ_UNLOCK(); - goto bad; - } + if (m == NULL) { + /* More fragments should come; silently return. */ + return; } - ip->ip_off = htons((ntohs(ip->ip_off) & IP_OFFMASK) << 3); - /* - * If datagram marked as having more fragments - * or if this is not the first fragment, - * attempt reassembly; if it succeeds, proceed. + * Reassembly is done, we have the final packet. + * Updated cached data in local variable(s). */ - if (mff || ip->ip_off != htons(0)) { - ipstat.ips_fragments++; - s = splvm(); - ipqe = pool_get(&ipqent_pool, PR_NOWAIT); - splx(s); - if (ipqe == NULL) { - ipstat.ips_rcvmemdrop++; - IPQ_UNLOCK(); - goto bad; - } - ipqe->ipqe_mff = mff; - ipqe->ipqe_m = m; - ipqe->ipqe_ip = ip; - m = ip_reass(ipqe, fp, &ipq[hash]); - if (m == 0) { - IPQ_UNLOCK(); - return; - } - ipstat.ips_reassembled++; - ip = mtod(m, struct ip *); - hlen = ip->ip_hl << 2; - ip->ip_len = htons(ntohs(ip->ip_len) + hlen); - } else - if (fp) - ip_freef(fp); - IPQ_UNLOCK(); + ip = mtod(m, struct ip *); + hlen = ip->ip_hl << 2; } -#if defined(IPSEC) - /* - * enforce IPsec policy checking if we are seeing last header. - * note that we do not visit this with protocols with pcb layer - * code - like udp/tcp/raw ip. - */ - if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0 && - ipsec4_in_reject(m, NULL)) { - ipsecstat.in_polvio++; - goto bad; - } -#endif -#ifdef FAST_IPSEC +#ifdef IPSEC /* - * enforce IPsec policy checking if we are seeing last header. - * note that we do not visit this with protocols with pcb layer - * code - like udp/tcp/raw ip. + * Enforce IPsec policy checking if we are seeing last header. + * Note that we do not visit this with protocols with PCB layer + * code - like UDP/TCP/raw IP. */ if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0) { - /* - * Check if the packet has already had IPsec processing - * done. If so, then just pass it along. This tag gets - * set during AH, ESP, etc. input handling, before the - * packet is returned to the ip input queue for delivery. - */ - mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); - s = splsoftnet(); - if (mtag != NULL) { - tdbi = (struct tdb_ident *)(mtag + 1); - sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND); - } else { - sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND, - IP_FORWARDING, &error); - } - if (sp != NULL) { - /* - * Check security policy against packet attributes. - */ - error = ipsec_in_reject(sp, m); - KEY_FREESP(&sp); - } else { - /* XXX error stat??? */ - error = EINVAL; -DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/ + if (ipsec4_input(m, 0) != 0) { goto bad; } - splx(s); - if (error) - goto bad; } -#endif /* FAST_IPSEC */ +#endif /* * Switch out to protocol's input routine. @@ -1028,7 +769,7 @@ DPRINTF(("ip_input: no SP, packet discar if (ia && ip) ia->ia_ifa.ifa_data.ifad_inbytes += ntohs(ip->ip_len); #endif - ipstat.ips_delivered++; + IP_STATINC(IP_STAT_DELIVERED); { int off = hlen, nh = ip->ip_p; @@ -1040,400 +781,36 @@ bad: return; badcsum: - ipstat.ips_badsum++; + IP_STATINC(IP_STAT_BADSUM); m_freem(m); } /* - * Take incoming datagram fragment and try to - * reassemble it into whole datagram. If a chain for - * reassembly of this datagram already exists, then it - * is given as fp; otherwise have to make a chain. - */ -struct mbuf * -ip_reass(struct ipqent *ipqe, struct ipq *fp, struct ipqhead *ipqhead) -{ - struct mbuf *m = ipqe->ipqe_m; - struct ipqent *nq, *p, *q; - struct ip *ip; - struct mbuf *t; - int hlen = ipqe->ipqe_ip->ip_hl << 2; - int i, next, s; - - IPQ_LOCK_CHECK(); - - /* - * Presence of header sizes in mbufs - * would confuse code below. - */ - m->m_data += hlen; - m->m_len -= hlen; - -#ifdef notyet - /* make sure fragment limit is up-to-date */ - CHECK_NMBCLUSTER_PARAMS(); - - /* If we have too many fragments, drop the older half. */ - if (ip_nfrags >= ip_maxfrags) - ip_reass_drophalf(void); -#endif - - /* - * We are about to add a fragment; increment frag count. - */ - ip_nfrags++; - - /* - * If first fragment to arrive, create a reassembly queue. - */ - if (fp == 0) { - /* - * Enforce upper bound on number of fragmented packets - * for which we attempt reassembly; - * If maxfrag is 0, never accept fragments. - * If maxfrag is -1, accept all fragments without limitation. - */ - if (ip_maxfragpackets < 0) - ; - else if (ip_nfragpackets >= ip_maxfragpackets) - goto dropfrag; - ip_nfragpackets++; - MALLOC(fp, struct ipq *, sizeof (struct ipq), - M_FTABLE, M_NOWAIT); - if (fp == NULL) - goto dropfrag; - LIST_INSERT_HEAD(ipqhead, fp, ipq_q); - fp->ipq_nfrags = 1; - fp->ipq_ttl = IPFRAGTTL; - fp->ipq_p = ipqe->ipqe_ip->ip_p; - fp->ipq_id = ipqe->ipqe_ip->ip_id; - TAILQ_INIT(&fp->ipq_fragq); - fp->ipq_src = ipqe->ipqe_ip->ip_src; - fp->ipq_dst = ipqe->ipqe_ip->ip_dst; - p = NULL; - goto insert; - } else { - fp->ipq_nfrags++; - } - - /* - * Find a segment which begins after this one does. - */ - for (p = NULL, q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL; - p = q, q = TAILQ_NEXT(q, ipqe_q)) - if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off)) - break; - - /* - * If there is a preceding segment, it may provide some of - * our data already. If so, drop the data from the incoming - * segment. If it provides all of our data, drop us. - */ - if (p != NULL) { - i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - - ntohs(ipqe->ipqe_ip->ip_off); - if (i > 0) { - if (i >= ntohs(ipqe->ipqe_ip->ip_len)) - goto dropfrag; - m_adj(ipqe->ipqe_m, i); - ipqe->ipqe_ip->ip_off = - htons(ntohs(ipqe->ipqe_ip->ip_off) + i); - ipqe->ipqe_ip->ip_len = - htons(ntohs(ipqe->ipqe_ip->ip_len) - i); - } - } - - /* - * While we overlap succeeding segments trim them or, - * if they are completely covered, dequeue them. - */ - for (; q != NULL && - ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) > - ntohs(q->ipqe_ip->ip_off); q = nq) { - i = (ntohs(ipqe->ipqe_ip->ip_off) + - ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off); - if (i < ntohs(q->ipqe_ip->ip_len)) { - q->ipqe_ip->ip_len = - htons(ntohs(q->ipqe_ip->ip_len) - i); - q->ipqe_ip->ip_off = - htons(ntohs(q->ipqe_ip->ip_off) + i); - m_adj(q->ipqe_m, i); - break; - } - nq = TAILQ_NEXT(q, ipqe_q); - m_freem(q->ipqe_m); - TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q); - s = splvm(); - pool_put(&ipqent_pool, q); - splx(s); - fp->ipq_nfrags--; - ip_nfrags--; - } - -insert: - /* - * Stick new segment in its place; - * check for complete reassembly. - */ - if (p == NULL) { - TAILQ_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); - } else { - TAILQ_INSERT_AFTER(&fp->ipq_fragq, p, ipqe, ipqe_q); - } - next = 0; - for (p = NULL, q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL; - p = q, q = TAILQ_NEXT(q, ipqe_q)) { - if (ntohs(q->ipqe_ip->ip_off) != next) - return (0); - next += ntohs(q->ipqe_ip->ip_len); - } - if (p->ipqe_mff) - return (0); - - /* - * Reassembly is complete. Check for a bogus message size and - * concatenate fragments. - */ - q = TAILQ_FIRST(&fp->ipq_fragq); - ip = q->ipqe_ip; - if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { - ipstat.ips_toolong++; - ip_freef(fp); - return (0); - } - m = q->ipqe_m; - t = m->m_next; - m->m_next = 0; - m_cat(m, t); - nq = TAILQ_NEXT(q, ipqe_q); - s = splvm(); - pool_put(&ipqent_pool, q); - splx(s); - for (q = nq; q != NULL; q = nq) { - t = q->ipqe_m; - nq = TAILQ_NEXT(q, ipqe_q); - s = splvm(); - pool_put(&ipqent_pool, q); - splx(s); - m_cat(m, t); - } - ip_nfrags -= fp->ipq_nfrags; - - /* - * Create header for new ip packet by - * modifying header of first packet; - * dequeue and discard fragment reassembly header. - * Make header visible. - */ - ip->ip_len = htons(next); - ip->ip_src = fp->ipq_src; - ip->ip_dst = fp->ipq_dst; - LIST_REMOVE(fp, ipq_q); - FREE(fp, M_FTABLE); - ip_nfragpackets--; - m->m_len += (ip->ip_hl << 2); - m->m_data -= (ip->ip_hl << 2); - /* some debugging cruft by sklower, below, will go away soon */ - if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ - int plen = 0; - for (t = m; t; t = t->m_next) - plen += t->m_len; - m->m_pkthdr.len = plen; - m->m_pkthdr.csum_flags = 0; - } - return (m); - -dropfrag: - if (fp != 0) - fp->ipq_nfrags--; - ip_nfrags--; - ipstat.ips_fragdropped++; - m_freem(m); - s = splvm(); - pool_put(&ipqent_pool, ipqe); - splx(s); - return (0); -} - -/* - * Free a fragment reassembly header and all - * associated datagrams. - */ -void -ip_freef(struct ipq *fp) -{ - struct ipqent *q, *p; - u_int nfrags = 0; - int s; - - IPQ_LOCK_CHECK(); - - for (q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL; q = p) { - p = TAILQ_NEXT(q, ipqe_q); - m_freem(q->ipqe_m); - nfrags++; - TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q); - s = splvm(); - pool_put(&ipqent_pool, q); - splx(s); - } - - if (nfrags != fp->ipq_nfrags) - printf("ip_freef: nfrags %d != %d\n", fp->ipq_nfrags, nfrags); - ip_nfrags -= nfrags; - LIST_REMOVE(fp, ipq_q); - FREE(fp, M_FTABLE); - ip_nfragpackets--; -} - -/* - * IP reassembly TTL machinery for multiplicative drop. - */ -static u_int fragttl_histo[(IPFRAGTTL+1)]; - - -/* - * Decrement TTL of all reasembly queue entries by `ticks'. - * Count number of distinct fragments (as opposed to partial, fragmented - * datagrams) in the reassembly queue. While we traverse the entire - * reassembly queue, compute and return the median TTL over all fragments. - */ -static u_int -ip_reass_ttl_decr(u_int ticks) -{ - u_int nfrags, median, dropfraction, keepfraction; - struct ipq *fp, *nfp; - int i; - - nfrags = 0; - memset(fragttl_histo, 0, sizeof fragttl_histo); - - for (i = 0; i < IPREASS_NHASH; i++) { - for (fp = LIST_FIRST(&ipq[i]); fp != NULL; fp = nfp) { - fp->ipq_ttl = ((fp->ipq_ttl <= ticks) ? - 0 : fp->ipq_ttl - ticks); - nfp = LIST_NEXT(fp, ipq_q); - if (fp->ipq_ttl == 0) { - ipstat.ips_fragtimeout++; - ip_freef(fp); - } else { - nfrags += fp->ipq_nfrags; - fragttl_histo[fp->ipq_ttl] += fp->ipq_nfrags; - } - } - } - - KASSERT(ip_nfrags == nfrags); - - /* Find median (or other drop fraction) in histogram. */ - dropfraction = (ip_nfrags / 2); - keepfraction = ip_nfrags - dropfraction; - for (i = IPFRAGTTL, median = 0; i >= 0; i--) { - median += fragttl_histo[i]; - if (median >= keepfraction) - break; - } - - /* Return TTL of median (or other fraction). */ - return (u_int)i; -} - -void -ip_reass_drophalf(void) -{ - - u_int median_ticks; - /* - * Compute median TTL of all fragments, and count frags - * with that TTL or lower (roughly half of all fragments). - */ - median_ticks = ip_reass_ttl_decr(0); - - /* Drop half. */ - median_ticks = ip_reass_ttl_decr(median_ticks); - -} - -/* - * IP timer processing; - * if a timer expires on a reassembly - * queue, discard it. + * IP timer processing. */ void ip_slowtimo(void) { - static u_int dropscanidx = 0; - u_int i; - u_int median_ttl; - int s = splsoftnet(); - - IPQ_LOCK(); - - /* Age TTL of all fragments by 1 tick .*/ - median_ttl = ip_reass_ttl_decr(1); - /* make sure fragment limit is up-to-date */ - CHECK_NMBCLUSTER_PARAMS(); + mutex_enter(softnet_lock); + KERNEL_LOCK(1, NULL); - /* If we have too many fragments, drop the older half. */ - if (ip_nfrags > ip_maxfrags) - ip_reass_ttl_decr(median_ttl); - - /* - * If we are over the maximum number of fragmented packets - * (due to the limit being lowered), drain off - * enough to get down to the new limit. Start draining - * from the reassembly hashqueue most recently drained. - */ - if (ip_maxfragpackets < 0) - ; - else { - int wrapped = 0; + ip_reass_slowtimo(); - i = dropscanidx; - while (ip_nfragpackets > ip_maxfragpackets && wrapped == 0) { - while (LIST_FIRST(&ipq[i]) != NULL) - ip_freef(LIST_FIRST(&ipq[i])); - if (++i >= IPREASS_NHASH) { - i = 0; - } - /* - * Dont scan forever even if fragment counters are - * wrong: stop after scanning entire reassembly queue. - */ - if (i == dropscanidx) - wrapped = 1; - } - dropscanidx = i; - } - IPQ_UNLOCK(); -#ifdef GATEWAY - ipflow_slowtimo(); -#endif - splx(s); + KERNEL_UNLOCK_ONE(NULL); + mutex_exit(softnet_lock); } /* - * Drain off all datagram fragments. + * IP drain processing. */ void ip_drain(void) { - /* - * We may be called from a device's interrupt context. If - * the ipq is already busy, just bail out now. - */ - if (ipq_lock_try() == 0) - return; - - /* - * Drop half the total fragments now. If more mbufs are needed, - * we will be called again soon. - */ - ip_reass_drophalf(); - - IPQ_UNLOCK(); + KERNEL_LOCK(1, NULL); + ip_reass_drain(); + KERNEL_UNLOCK_ONE(NULL); } /* @@ -1528,7 +905,7 @@ ip_dooptions(struct mbuf *m) /* * locate outgoing interface */ - bcopy((caddr_t)(cp + off), (caddr_t)&ipaddr.sin_addr, + memcpy((void *)&ipaddr.sin_addr, (void *)(cp + off), sizeof(ipaddr.sin_addr)); if (opt == IPOPT_SSRR) ia = ifatoia(ifa_ifwithladdr(sintosa(&ipaddr))); @@ -1540,8 +917,8 @@ ip_dooptions(struct mbuf *m) goto bad; } ip->ip_dst = ipaddr.sin_addr; - bcopy((caddr_t)&ia->ia_addr.sin_addr, - (caddr_t)(cp + off), sizeof(struct in_addr)); + bcopy((void *)&ia->ia_addr.sin_addr, + (void *)(cp + off), sizeof(struct in_addr)); cp[IPOPT_OFFSET] += sizeof(struct in_addr); /* * Let ip_intr's mcast routing check handle mcast pkts @@ -1564,7 +941,7 @@ ip_dooptions(struct mbuf *m) off--; /* 0 origin */ if ((off + sizeof(struct in_addr)) > optlen) break; - bcopy((caddr_t)(&ip->ip_dst), (caddr_t)&ipaddr.sin_addr, + memcpy((void *)&ipaddr.sin_addr, (void *)(&ip->ip_dst), sizeof(ipaddr.sin_addr)); /* * locate outgoing interface; if we're the destination, @@ -1577,8 +954,8 @@ ip_dooptions(struct mbuf *m) code = ICMP_UNREACH_HOST; goto bad; } - bcopy((caddr_t)&ia->ia_addr.sin_addr, - (caddr_t)(cp + off), sizeof(struct in_addr)); + bcopy((void *)&ia->ia_addr.sin_addr, + (void *)(cp + off), sizeof(struct in_addr)); cp[IPOPT_OFFSET] += sizeof(struct in_addr); break; @@ -1631,7 +1008,7 @@ ip_dooptions(struct mbuf *m) (u_char *)ip; goto bad; } - bcopy(cp0, &ipaddr.sin_addr, + memcpy(&ipaddr.sin_addr, cp0, sizeof(struct in_addr)); if (ifatoia(ifa_ifwithaddr(sintosa(&ipaddr))) == NULL) @@ -1647,7 +1024,7 @@ ip_dooptions(struct mbuf *m) } ntime = iptime(); cp0 = (u_char *) &ntime; /* XXX grumble, GCC... */ - bcopy(cp0, (caddr_t)cp + ipt->ipt_ptr - 1, + memmove((char *)cp + ipt->ipt_ptr - 1, cp0, sizeof(n_time)); ipt->ipt_ptr += sizeof(n_time); } @@ -1664,7 +1041,7 @@ ip_dooptions(struct mbuf *m) return (0); bad: icmp_error(m, type, code, 0, 0); - ipstat.ips_badoptions++; + IP_STATINC(IP_STAT_BADOPTIONS); return (1); } @@ -1675,24 +1052,18 @@ bad: struct in_ifaddr * ip_rtaddr(struct in_addr dst) { - struct sockaddr_in *sin; + struct rtentry *rt; + union { + struct sockaddr dst; + struct sockaddr_in dst4; + } u; - sin = satosin(&ipforward_rt.ro_dst); + sockaddr_in_init(&u.dst4, &dst, 0); - if (ipforward_rt.ro_rt == 0 || !in_hosteq(dst, sin->sin_addr)) { - if (ipforward_rt.ro_rt) { - RTFREE(ipforward_rt.ro_rt); - ipforward_rt.ro_rt = 0; - } - sin->sin_family = AF_INET; - sin->sin_len = sizeof(*sin); - sin->sin_addr = dst; - - rtalloc(&ipforward_rt); - } - if (ipforward_rt.ro_rt == 0) - return ((struct in_ifaddr *)0); - return (ifatoia(ipforward_rt.ro_rt->rt_ifa)); + if ((rt = rtcache_lookup(&ipforward_rt, &u.dst)) == NULL) + return NULL; + + return ifatoia(rt->rt_ifa); } /* @@ -1711,7 +1082,7 @@ save_rte(u_char *option, struct in_addr #endif /* 0 */ if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) return; - bcopy((caddr_t)option, (caddr_t)ip_srcrt.srcopt, olen); + memcpy((void *)ip_srcrt.srcopt, (void *)option, olen); ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); ip_srcrt.dst = dst; } @@ -1728,10 +1099,10 @@ ip_srcroute(void) struct mbuf *m; if (ip_nhops == 0) - return ((struct mbuf *)0); + return NULL; m = m_get(M_DONTWAIT, MT_SOOPTS); if (m == 0) - return ((struct mbuf *)0); + return NULL; MCLAIM(m, &inetdomain.dom_mowner); #define OPTSIZ (sizeof(ip_srcrt.nop) + sizeof(ip_srcrt.srcopt)) @@ -1759,9 +1130,9 @@ ip_srcroute(void) */ ip_srcrt.nop = IPOPT_NOP; ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF; - bcopy((caddr_t)&ip_srcrt.nop, - mtod(m, caddr_t) + sizeof(struct in_addr), OPTSIZ); - q = (struct in_addr *)(mtod(m, caddr_t) + + memmove(mtod(m, char *) + sizeof(struct in_addr), &ip_srcrt.nop, + OPTSIZ); + q = (struct in_addr *)(mtod(m, char *) + sizeof(struct in_addr) + OPTSIZ); #undef OPTSIZ /* @@ -1786,40 +1157,32 @@ ip_srcroute(void) return (m); } -/* - * Strip out IP options, at higher - * level protocol in the kernel. - * Second argument is buffer to which options - * will be moved, and return value is their length. - * XXX should be deleted; last arg currently ignored. - */ +const int inetctlerrmap[PRC_NCMDS] = { + [PRC_MSGSIZE] = EMSGSIZE, + [PRC_HOSTDEAD] = EHOSTDOWN, + [PRC_HOSTUNREACH] = EHOSTUNREACH, + [PRC_UNREACH_NET] = EHOSTUNREACH, + [PRC_UNREACH_HOST] = EHOSTUNREACH, + [PRC_UNREACH_PROTOCOL] = ECONNREFUSED, + [PRC_UNREACH_PORT] = ECONNREFUSED, + [PRC_UNREACH_SRCFAIL] = EHOSTUNREACH, + [PRC_PARAMPROB] = ENOPROTOOPT, +}; + void -ip_stripoptions(struct mbuf *m, struct mbuf *mopt) +ip_fasttimo(void) { - int i; - struct ip *ip = mtod(m, struct ip *); - caddr_t opts; - int olen; - - olen = (ip->ip_hl << 2) - sizeof (struct ip); - opts = (caddr_t)(ip + 1); - i = m->m_len - (sizeof (struct ip) + olen); - bcopy(opts + olen, opts, (unsigned)i); - m->m_len -= olen; - if (m->m_flags & M_PKTHDR) - m->m_pkthdr.len -= olen; - ip->ip_len = htons(ntohs(ip->ip_len) - olen); - ip->ip_hl = sizeof (struct ip) >> 2; + if (ip_drainwanted) { + ip_drain(); + ip_drainwanted = 0; + } } -const int inetctlerrmap[PRC_NCMDS] = { - 0, 0, 0, 0, - 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, - EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, - EMSGSIZE, EHOSTUNREACH, 0, 0, - 0, 0, 0, 0, - ENOPROTOOPT -}; +void +ip_drainstub(void) +{ + ip_drainwanted = 1; +} /* * Forward a packet. If some error occurs return the sender @@ -1839,11 +1202,14 @@ void ip_forward(struct mbuf *m, int srcrt) { struct ip *ip = mtod(m, struct ip *); - struct sockaddr_in *sin; struct rtentry *rt; int error, type = 0, code = 0, destmtu = 0; struct mbuf *mcopy; n_long dest; + union { + struct sockaddr dst; + struct sockaddr_in dst4; + } u; /* * We are now in the output path. @@ -1863,7 +1229,7 @@ ip_forward(struct mbuf *m, int srcrt) } #endif if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) { - ipstat.ips_cantforward++; + IP_STATINC(IP_STAT_CANTFORWARD); m_freem(m); return; } @@ -1872,23 +1238,10 @@ ip_forward(struct mbuf *m, int srcrt) return; } - sin = satosin(&ipforward_rt.ro_dst); - if ((rt = ipforward_rt.ro_rt) == 0 || - !in_hosteq(ip->ip_dst, sin->sin_addr)) { - if (ipforward_rt.ro_rt) { - RTFREE(ipforward_rt.ro_rt); - ipforward_rt.ro_rt = 0; - } - sin->sin_family = AF_INET; - sin->sin_len = sizeof(struct sockaddr_in); - sin->sin_addr = ip->ip_dst; - - rtalloc(&ipforward_rt); - if (ipforward_rt.ro_rt == 0) { - icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_NET, dest, 0); - return; - } - rt = ipforward_rt.ro_rt; + sockaddr_in_init(&u.dst4, &ip->ip_dst, 0); + if ((rt = rtcache_lookup(&ipforward_rt, &u.dst)) == NULL) { + icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_NET, dest, 0); + return; } /* @@ -1912,7 +1265,7 @@ ip_forward(struct mbuf *m, int srcrt) */ if (rt->rt_ifp == m->m_pkthdr.rcvif && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && - !in_nullhost(satosin(rt_key(rt))->sin_addr) && + !in_nullhost(satocsin(rt_getkey(rt))->sin_addr) && ipsendredirects && !srcrt) { if (rt->rt_ifa && (ip->ip_src.s_addr & ifatoia(rt->rt_ifa)->ia_subnetmask) == @@ -1935,17 +1288,20 @@ ip_forward(struct mbuf *m, int srcrt) } } - error = ip_output(m, (struct mbuf *)0, &ipforward_rt, + error = ip_output(m, NULL, &ipforward_rt, (IP_FORWARDING | (ip_directedbcast ? IP_ALLOWBROADCAST : 0)), - (struct ip_moptions *)NULL, (struct socket *)NULL); + NULL, NULL); if (error) - ipstat.ips_cantforward++; + IP_STATINC(IP_STAT_CANTFORWARD); else { - ipstat.ips_forward++; - if (type) - ipstat.ips_redirectsent++; - else { + uint64_t *ips = IP_STAT_GETREF(); + ips[IP_STAT_FORWARD]++; + if (type) { + ips[IP_STAT_REDIRECTSENT]++; + IP_STAT_PUTREF(); + } else { + IP_STAT_PUTREF(); if (mcopy) { #ifdef GATEWAY if (mcopy->m_flags & M_CANFASTFWD) @@ -1977,60 +1333,13 @@ ip_forward(struct mbuf *m, int srcrt) case EMSGSIZE: type = ICMP_UNREACH; code = ICMP_UNREACH_NEEDFRAG; -#if !defined(IPSEC) && !defined(FAST_IPSEC) - if (ipforward_rt.ro_rt) - destmtu = ipforward_rt.ro_rt->rt_ifp->if_mtu; -#else - /* - * If the packet is routed over IPsec tunnel, tell the - * originator the tunnel MTU. - * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz - * XXX quickhack!!! - */ - if (ipforward_rt.ro_rt) { - struct secpolicy *sp; - int ipsecerror; - size_t ipsechdr; - struct route *ro; - - sp = ipsec4_getpolicybyaddr(mcopy, - IPSEC_DIR_OUTBOUND, IP_FORWARDING, - &ipsecerror); - - if (sp == NULL) - destmtu = ipforward_rt.ro_rt->rt_ifp->if_mtu; - else { - /* count IPsec header size */ - ipsechdr = ipsec4_hdrsiz(mcopy, - IPSEC_DIR_OUTBOUND, NULL); - /* - * find the correct route for outer IPv4 - * header, compute tunnel MTU. - */ - - if (sp->req != NULL - && sp->req->sav != NULL - && sp->req->sav->sah != NULL) { - ro = &sp->req->sav->sah->sa_route; - if (ro->ro_rt && ro->ro_rt->rt_ifp) { - destmtu = - ro->ro_rt->rt_rmx.rmx_mtu ? - ro->ro_rt->rt_rmx.rmx_mtu : - ro->ro_rt->rt_ifp->if_mtu; - destmtu -= ipsechdr; - } - } - -#ifdef IPSEC - key_freesp(sp); -#else - KEY_FREESP(&sp); + if ((rt = rtcache_validate(&ipforward_rt)) != NULL) + destmtu = rt->rt_ifp->if_mtu; +#ifdef IPSEC + (void)ipsec4_forward(mcopy, &destmtu); #endif - } - } -#endif /*IPSEC*/ - ipstat.ips_cantfrag++; + IP_STATINC(IP_STAT_CANTFRAG); break; case ENOBUFS: @@ -2058,21 +1367,51 @@ ip_savecontrol(struct inpcb *inp, struct struct mbuf *m) { - if (inp->inp_socket->so_options & SO_TIMESTAMP) { + if (inp->inp_socket->so_options & SO_TIMESTAMP +#ifdef SO_OTIMESTAMP + || inp->inp_socket->so_options & SO_OTIMESTAMP +#endif + ) { struct timeval tv; microtime(&tv); - *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), +#ifdef SO_OTIMESTAMP + if (inp->inp_socket->so_options & SO_OTIMESTAMP) { + struct timeval50 tv50; + timeval_to_timeval50(&tv, &tv50); + *mp = sbcreatecontrol((void *) &tv50, sizeof(tv50), + SCM_OTIMESTAMP, SOL_SOCKET); + } else +#endif + *mp = sbcreatecontrol((void *) &tv, sizeof(tv), SCM_TIMESTAMP, SOL_SOCKET); if (*mp) mp = &(*mp)->m_next; } if (inp->inp_flags & INP_RECVDSTADDR) { - *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, + *mp = sbcreatecontrol((void *) &ip->ip_dst, sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; } + if (inp->inp_flags & INP_RECVPKTINFO) { + struct in_pktinfo ipi; + ipi.ipi_addr = ip->ip_src; + ipi.ipi_ifindex = m->m_pkthdr.rcvif->if_index; + *mp = sbcreatecontrol((void *) &ipi, + sizeof(ipi), IP_RECVPKTINFO, IPPROTO_IP); + if (*mp) + mp = &(*mp)->m_next; + } + if (inp->inp_flags & INP_PKTINFO) { + struct in_pktinfo ipi; + ipi.ipi_addr = ip->ip_dst; + ipi.ipi_ifindex = m->m_pkthdr.rcvif->if_index; + *mp = sbcreatecontrol((void *) &ipi, + sizeof(ipi), IP_PKTINFO, IPPROTO_IP); + if (*mp) + mp = &(*mp)->m_next; + } #ifdef notyet /* * XXX @@ -2082,14 +1421,14 @@ ip_savecontrol(struct inpcb *inp, struct */ /* options were tossed already */ if (inp->inp_flags & INP_RECVOPTS) { - *mp = sbcreatecontrol((caddr_t) opts_deleted_above, + *mp = sbcreatecontrol((void *) opts_deleted_above, sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; } /* ip_srcroute doesn't do what we want here, need to fix */ if (inp->inp_flags & INP_RECVRETOPTS) { - *mp = sbcreatecontrol((caddr_t) ip_srcroute(), + *mp = sbcreatecontrol((void *) ip_srcroute(), sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; @@ -2098,13 +1437,18 @@ ip_savecontrol(struct inpcb *inp, struct if (inp->inp_flags & INP_RECVIF) { struct sockaddr_dl sdl; - sdl.sdl_len = offsetof(struct sockaddr_dl, sdl_data[0]); - sdl.sdl_family = AF_LINK; - sdl.sdl_index = m->m_pkthdr.rcvif ? - m->m_pkthdr.rcvif->if_index : 0; - sdl.sdl_nlen = sdl.sdl_alen = sdl.sdl_slen = 0; - *mp = sbcreatecontrol((caddr_t) &sdl, sdl.sdl_len, - IP_RECVIF, IPPROTO_IP); + sockaddr_dl_init(&sdl, sizeof(sdl), + (m->m_pkthdr.rcvif != NULL) + ? m->m_pkthdr.rcvif->if_index + : 0, + 0, NULL, 0, NULL, 0); + *mp = sbcreatecontrol(&sdl, sdl.sdl_len, IP_RECVIF, IPPROTO_IP); + if (*mp) + mp = &(*mp)->m_next; + } + if (inp->inp_flags & INP_RECVTTL) { + *mp = sbcreatecontrol((void *) &ip->ip_ttl, + sizeof(uint8_t), IP_RECVTTL, IPPROTO_IP); if (*mp) mp = &(*mp)->m_next; } @@ -2126,9 +1470,10 @@ sysctl_net_inet_ip_forwsrcrt(SYSCTLFN_AR if (error || newp == NULL) return (error); - if (kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FORWSRCRT, - 0, NULL, NULL, NULL)) - return (EPERM); + error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_FORWSRCRT, + 0, NULL, NULL, NULL); + if (error) + return (error); ip_forwsrcrt = tmp; @@ -2154,46 +1499,90 @@ sysctl_net_inet_ip_pmtudto(SYSCTLFN_ARGS if (tmp < 0) return (EINVAL); + mutex_enter(softnet_lock); + ip_mtudisc_timeout = tmp; rt_timer_queue_change(ip_mtudisc_timeout_q, ip_mtudisc_timeout); + mutex_exit(softnet_lock); + return (0); } #ifdef GATEWAY /* - * sysctl helper routine for net.inet.ip.maxflows. apparently if - * maxflows is even looked up, we "reap flows". + * sysctl helper routine for net.inet.ip.maxflows. */ static int sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS) { - int s; + int error; + + error = sysctl_lookup(SYSCTLFN_CALL(rnode)); + if (error || newp == NULL) + return (error); + + mutex_enter(softnet_lock); + KERNEL_LOCK(1, NULL); + + ipflow_reap(false); - s = sysctl_lookup(SYSCTLFN_CALL(rnode)); - if (s) - return (s); - - s = splsoftnet(); - ipflow_reap(0); - splx(s); + KERNEL_UNLOCK_ONE(NULL); + mutex_exit(softnet_lock); return (0); } + +static int +sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS) +{ + int error, tmp; + struct sysctlnode node; + + node = *rnode; + tmp = ip_hashsize; + node.sysctl_data = &tmp; + error = sysctl_lookup(SYSCTLFN_CALL(&node)); + if (error || newp == NULL) + return (error); + + if ((tmp & (tmp - 1)) == 0 && tmp != 0) { + /* + * Can only fail due to malloc() + */ + mutex_enter(softnet_lock); + KERNEL_LOCK(1, NULL); + + error = ipflow_invalidate_all(tmp); + + KERNEL_UNLOCK_ONE(NULL); + mutex_exit(softnet_lock); + + } else { + /* + * EINVAL if not a power of 2 + */ + error = EINVAL; + } + + return error; +} #endif /* GATEWAY */ +static int +sysctl_net_inet_ip_stats(SYSCTLFN_ARGS) +{ -SYSCTL_SETUP(sysctl_net_inet_ip_setup, "sysctl net.inet.ip subtree setup") + return (NETSTAT_SYSCTL(ipstat_percpu, IP_NSTATS)); +} + +static void +sysctl_net_inet_ip_setup(struct sysctllog **clog) { extern int subnetsarelocal, hostzeroisbroadcast; sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, - CTLTYPE_NODE, "net", NULL, - NULL, 0, NULL, 0, - CTL_NET, CTL_EOL); - sysctl_createv(clog, 0, NULL, NULL, - CTLFLAG_PERMANENT, CTLTYPE_NODE, "inet", SYSCTL_DESCR("PF_INET related settings"), NULL, 0, NULL, 0, @@ -2290,7 +1679,7 @@ SYSCTL_SETUP(sysctl_net_inet_ip_setup, " CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "mtudisctimeout", SYSCTL_DESCR("Lifetime of a Path MTU Discovered route"), - sysctl_net_inet_ip_pmtudto, 0, &ip_mtudisc_timeout, 0, + sysctl_net_inet_ip_pmtudto, 0, (void *)&ip_mtudisc_timeout, 0, CTL_NET, PF_INET, IPPROTO_IP, IPCTL_MTUDISCTIMEOUT, CTL_EOL); #ifdef GATEWAY @@ -2301,6 +1690,13 @@ SYSCTL_SETUP(sysctl_net_inet_ip_setup, " sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0, CTL_NET, PF_INET, IPPROTO_IP, IPCTL_MAXFLOWS, CTL_EOL); + sysctl_createv(clog, 0, NULL, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READWRITE, + CTLTYPE_INT, "hashsize", + SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"), + sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0, + CTL_NET, PF_INET, IPPROTO_IP, + CTL_CREATE, CTL_EOL); #endif /* GATEWAY */ sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, @@ -2336,14 +1732,6 @@ SYSCTL_SETUP(sysctl_net_inet_ip_setup, " CTL_NET, PF_INET, IPPROTO_IP, IPCTL_LOWPORTMAX, CTL_EOL); #endif /* IPNOPRIVPORTS */ - sysctl_createv(clog, 0, NULL, NULL, - CTLFLAG_PERMANENT|CTLFLAG_READWRITE, - CTLTYPE_INT, "maxfragpackets", - SYSCTL_DESCR("Maximum number of fragments to retain for " - "possible reassembly"), - NULL, 0, &ip_maxfragpackets, 0, - CTL_NET, PF_INET, IPPROTO_IP, - IPCTL_MAXFRAGPACKETS, CTL_EOL); #if NGRE > 0 sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, @@ -2379,7 +1767,42 @@ SYSCTL_SETUP(sysctl_net_inet_ip_setup, " CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "stats", SYSCTL_DESCR("IP statistics"), - NULL, 0, &ipstat, sizeof(ipstat), + sysctl_net_inet_ip_stats, 0, NULL, 0, CTL_NET, PF_INET, IPPROTO_IP, IPCTL_STATS, CTL_EOL); + + /* anonportalgo RFC6056 subtree */ + const struct sysctlnode *portalgo_node; + sysctl_createv(clog, 0, NULL, &portalgo_node, + CTLFLAG_PERMANENT, + CTLTYPE_NODE, "anonportalgo", + SYSCTL_DESCR("Anonymous Port Algorithm Selection (RFC 6056)"), + NULL, 0, NULL, 0, + CTL_NET, PF_INET, IPPROTO_IP, CTL_CREATE, CTL_EOL); + sysctl_createv(clog, 0, &portalgo_node, NULL, + CTLFLAG_PERMANENT, + CTLTYPE_STRING, "available", + SYSCTL_DESCR("available algorithms"), + sysctl_portalgo_available, 0, NULL, PORTALGO_MAXLEN, + CTL_CREATE, CTL_EOL); + sysctl_createv(clog, 0, &portalgo_node, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READWRITE, + CTLTYPE_STRING, "selected", + SYSCTL_DESCR("selected algorithm"), + sysctl_portalgo_selected4, 0, NULL, PORTALGO_MAXLEN, + CTL_CREATE, CTL_EOL); + sysctl_createv(clog, 0, &portalgo_node, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READWRITE, + CTLTYPE_STRUCT, "reserve", + SYSCTL_DESCR("bitmap of reserved ports"), + sysctl_portalgo_reserve4, 0, NULL, 0, + CTL_CREATE, CTL_EOL); +} + +void +ip_statinc(u_int stat) +{ + + KASSERT(stat < IP_NSTATS); + IP_STATINC(stat); }