usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / netinet / ip_input.c
blob1eb04245e0766e3d9d06f918af7c57cc6e8bbb8d
1 /*
2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
35 * Copyright (c) 1982, 1986, 1988, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
62 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
63 * $FreeBSD: src/sys/netinet/ip_input.c,v 1.130.2.52 2003/03/07 07:01:28 silby Exp $
66 #define _IP_VHL
68 #include "opt_bootp.h"
69 #include "opt_ipdn.h"
70 #include "opt_ipdivert.h"
71 #include "opt_ipstealth.h"
72 #include "opt_ipsec.h"
73 #include "opt_rss.h"
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/mpipe.h>
80 #include <sys/domain.h>
81 #include <sys/protosw.h>
82 #include <sys/socket.h>
83 #include <sys/time.h>
84 #include <sys/globaldata.h>
85 #include <sys/thread.h>
86 #include <sys/kernel.h>
87 #include <sys/syslog.h>
88 #include <sys/sysctl.h>
89 #include <sys/in_cksum.h>
90 #include <sys/lock.h>
92 #include <sys/mplock2.h>
94 #include <machine/stdarg.h>
96 #include <net/if.h>
97 #include <net/if_types.h>
98 #include <net/if_var.h>
99 #include <net/if_dl.h>
100 #include <net/pfil.h>
101 #include <net/route.h>
102 #include <net/netisr2.h>
104 #include <netinet/in.h>
105 #include <netinet/in_systm.h>
106 #include <netinet/in_var.h>
107 #include <netinet/ip.h>
108 #include <netinet/in_pcb.h>
109 #include <netinet/ip_var.h>
110 #include <netinet/ip_icmp.h>
111 #include <netinet/ip_divert.h>
112 #include <netinet/ip_flow.h>
114 #include <sys/thread2.h>
115 #include <sys/msgport2.h>
116 #include <net/netmsg2.h>
118 #include <sys/socketvar.h>
120 #include <net/ipfw/ip_fw.h>
121 #include <net/dummynet/ip_dummynet.h>
123 #ifdef IPSEC
124 #include <netinet6/ipsec.h>
125 #include <netproto/key/key.h>
126 #endif
128 #ifdef FAST_IPSEC
129 #include <netproto/ipsec/ipsec.h>
130 #include <netproto/ipsec/key.h>
131 #endif
133 int rsvp_on = 0;
134 static int ip_rsvp_on;
135 struct socket *ip_rsvpd;
137 int ipforwarding = 0;
138 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW,
139 &ipforwarding, 0, "Enable IP forwarding between interfaces");
141 static int ipsendredirects = 1; /* XXX */
142 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
143 &ipsendredirects, 0, "Enable sending IP redirects");
145 int ip_defttl = IPDEFTTL;
146 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
147 &ip_defttl, 0, "Maximum TTL on IP packets");
149 static int ip_dosourceroute = 0;
150 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, CTLFLAG_RW,
151 &ip_dosourceroute, 0, "Enable forwarding source routed IP packets");
153 static int ip_acceptsourceroute = 0;
154 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
155 CTLFLAG_RW, &ip_acceptsourceroute, 0,
156 "Enable accepting source routed IP packets");
158 static int ip_keepfaith = 0;
159 SYSCTL_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
160 &ip_keepfaith, 0,
161 "Enable packet capture for FAITH IPv4->IPv6 translator daemon");
163 static int maxnipq;
164 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_RW,
165 &maxnipq, 0,
166 "Maximum number of IPv4 fragment reassembly queue entries");
168 static int maxfragsperpacket;
169 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW,
170 &maxfragsperpacket, 0,
171 "Maximum number of IPv4 fragments allowed per packet");
173 static int ip_sendsourcequench = 0;
174 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW,
175 &ip_sendsourcequench, 0,
176 "Enable the transmission of source quench packets");
178 int ip_do_randomid = 1;
179 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW,
180 &ip_do_randomid, 0,
181 "Assign random ip_id values");
183 * XXX - Setting ip_checkinterface mostly implements the receive side of
184 * the Strong ES model described in RFC 1122, but since the routing table
185 * and transmit implementation do not implement the Strong ES model,
186 * setting this to 1 results in an odd hybrid.
188 * XXX - ip_checkinterface currently must be disabled if you use ipnat
189 * to translate the destination address to another local interface.
191 * XXX - ip_checkinterface must be disabled if you add IP aliases
192 * to the loopback interface instead of the interface where the
193 * packets for those addresses are received.
195 static int ip_checkinterface = 0;
196 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW,
197 &ip_checkinterface, 0, "Verify packet arrives on correct interface");
199 static u_long ip_hash_count = 0;
200 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, hash_count, CTLFLAG_RD,
201 &ip_hash_count, 0, "Number of packets hashed by IP");
203 #ifdef RSS_DEBUG
204 static u_long ip_rehash_count = 0;
205 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, rehash_count, CTLFLAG_RD,
206 &ip_rehash_count, 0, "Number of packets rehashed by IP");
208 static u_long ip_dispatch_fast = 0;
209 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, dispatch_fast_count, CTLFLAG_RD,
210 &ip_dispatch_fast, 0, "Number of packets handled on current CPU");
212 static u_long ip_dispatch_slow = 0;
213 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, dispatch_slow_count, CTLFLAG_RD,
214 &ip_dispatch_slow, 0, "Number of packets messaged to another CPU");
215 #endif
217 #ifdef DIAGNOSTIC
218 static int ipprintfs = 0;
219 #endif
221 extern struct domain inetdomain;
222 extern struct protosw inetsw[];
223 u_char ip_protox[IPPROTO_MAX];
224 struct in_ifaddrhead in_ifaddrheads[MAXCPU]; /* first inet address */
225 struct in_ifaddrhashhead *in_ifaddrhashtbls[MAXCPU];
226 /* inet addr hash table */
227 u_long in_ifaddrhmask; /* mask for hash table */
229 static struct mbuf *ipforward_mtemp[MAXCPU];
231 struct ip_stats ipstats_percpu[MAXCPU] __cachealign;
233 static int
234 sysctl_ipstats(SYSCTL_HANDLER_ARGS)
236 int cpu, error = 0;
238 for (cpu = 0; cpu < ncpus; ++cpu) {
239 if ((error = SYSCTL_OUT(req, &ipstats_percpu[cpu],
240 sizeof(struct ip_stats))))
241 break;
242 if ((error = SYSCTL_IN(req, &ipstats_percpu[cpu],
243 sizeof(struct ip_stats))))
244 break;
247 return (error);
249 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW),
250 0, 0, sysctl_ipstats, "S,ip_stats", "IP statistics");
252 /* Packet reassembly stuff */
253 #define IPREASS_NHASH_LOG2 6
254 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
255 #define IPREASS_HMASK (IPREASS_NHASH - 1)
256 #define IPREASS_HASH(x,y) \
257 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
259 TAILQ_HEAD(ipqhead, ipq);
260 struct ipfrag_queue {
261 int nipq;
262 int timeo_inprog;
263 struct netmsg_base timeo_netmsg;
264 struct netmsg_base drain_netmsg;
265 struct ipqhead ipq[IPREASS_NHASH];
266 } __cachealign;
268 static struct ipfrag_queue ipfrag_queue_pcpu[MAXCPU];
270 #ifdef IPCTL_DEFMTU
271 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
272 &ip_mtu, 0, "Default MTU");
273 #endif
275 #ifdef IPSTEALTH
276 static int ipstealth = 0;
277 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, &ipstealth, 0, "");
278 #else
279 static const int ipstealth = 0;
280 #endif
282 struct mbuf *(*ip_divert_p)(struct mbuf *, int, int);
284 struct pfil_head inet_pfil_hook;
287 * struct ip_srcrt_opt is used to store packet state while it travels
288 * through the stack.
290 * XXX Note that the code even makes assumptions on the size and
291 * alignment of fields inside struct ip_srcrt so e.g. adding some
292 * fields will break the code. This needs to be fixed.
294 * We need to save the IP options in case a protocol wants to respond
295 * to an incoming packet over the same route if the packet got here
296 * using IP source routing. This allows connection establishment and
297 * maintenance when the remote end is on a network that is not known
298 * to us.
300 struct ip_srcrt {
301 struct in_addr dst; /* final destination */
302 char nop; /* one NOP to align */
303 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */
304 struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)];
307 struct ip_srcrt_opt {
308 int ip_nhops;
309 struct ip_srcrt ip_srcrt;
312 #define IPFRAG_MPIPE_MAX 4096
313 #define MAXIPFRAG_MIN ((IPFRAG_MPIPE_MAX * 2) / 256)
315 static MALLOC_DEFINE(M_IPQ, "ipq", "IP Fragment Management");
316 static struct malloc_pipe ipq_mpipe;
318 static void save_rte(struct mbuf *, u_char *, struct in_addr);
319 static int ip_dooptions(struct mbuf *m, int, struct sockaddr_in *);
320 static void ip_freef(struct ipfrag_queue *, struct ipqhead *,
321 struct ipq *);
322 static void ip_input_handler(netmsg_t);
324 static void ipfrag_timeo_dispatch(netmsg_t);
325 static void ipfrag_drain_dispatch(netmsg_t);
328 * IP initialization: fill in IP protocol switch table.
329 * All protocols not implemented in kernel go to raw IP protocol handler.
331 void
332 ip_init(void)
334 struct protosw *pr;
335 int cpu, i;
338 * Make sure we can handle a reasonable number of fragments but
339 * cap it at IPFRAG_MPIPE_MAX.
341 mpipe_init(&ipq_mpipe, M_IPQ, sizeof(struct ipq),
342 IFQ_MAXLEN, IPFRAG_MPIPE_MAX, 0, NULL, NULL, NULL);
343 for (cpu = 0; cpu < ncpus; ++cpu) {
344 TAILQ_INIT(&in_ifaddrheads[cpu]);
345 in_ifaddrhashtbls[cpu] =
346 hashinit(INADDR_NHASH, M_IFADDR, &in_ifaddrhmask);
348 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
349 if (pr == NULL)
350 panic("ip_init");
351 for (i = 0; i < IPPROTO_MAX; i++)
352 ip_protox[i] = pr - inetsw;
353 for (pr = inetdomain.dom_protosw;
354 pr < inetdomain.dom_protoswNPROTOSW; pr++) {
355 if (pr->pr_domain->dom_family == PF_INET && pr->pr_protocol) {
356 if (pr->pr_protocol != IPPROTO_RAW)
357 ip_protox[pr->pr_protocol] = pr - inetsw;
361 inet_pfil_hook.ph_type = PFIL_TYPE_AF;
362 inet_pfil_hook.ph_af = AF_INET;
363 if ((i = pfil_head_register(&inet_pfil_hook)) != 0) {
364 kprintf("%s: WARNING: unable to register pfil hook, "
365 "error %d\n", __func__, i);
368 maxnipq = (nmbclusters / 32) / ncpus;
369 if (maxnipq < MAXIPFRAG_MIN)
370 maxnipq = MAXIPFRAG_MIN;
371 maxfragsperpacket = 16;
373 ip_id = time_second & 0xffff; /* time_second survives reboots */
375 for (cpu = 0; cpu < ncpus; ++cpu) {
377 * Initialize IP statistics counters for each CPU.
379 bzero(&ipstats_percpu[cpu], sizeof(struct ip_stats));
382 * Preallocate mbuf template for forwarding
384 MGETHDR(ipforward_mtemp[cpu], M_WAITOK, MT_DATA);
387 * Initialize per-cpu ip fragments queues
389 for (i = 0; i < IPREASS_NHASH; i++) {
390 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[cpu];
392 TAILQ_INIT(&fragq->ipq[i]);
393 netmsg_init(&fragq->timeo_netmsg, NULL,
394 &netisr_adone_rport, MSGF_PRIORITY,
395 ipfrag_timeo_dispatch);
396 netmsg_init(&fragq->drain_netmsg, NULL,
397 &netisr_adone_rport, MSGF_PRIORITY,
398 ipfrag_drain_dispatch);
402 netisr_register(NETISR_IP, ip_input_handler, ip_hashfn);
403 netisr_register_hashcheck(NETISR_IP, ip_hashcheck);
406 /* Do transport protocol processing. */
407 static void
408 transport_processing_oncpu(struct mbuf *m, int hlen, struct ip *ip)
410 const struct protosw *pr = &inetsw[ip_protox[ip->ip_p]];
413 * Switch out to protocol's input routine.
415 PR_GET_MPLOCK(pr);
416 pr->pr_input(&m, &hlen, ip->ip_p);
417 PR_REL_MPLOCK(pr);
420 static void
421 transport_processing_handler(netmsg_t msg)
423 struct netmsg_packet *pmsg = &msg->packet;
424 struct ip *ip;
425 int hlen;
427 ip = mtod(pmsg->nm_packet, struct ip *);
428 hlen = pmsg->base.lmsg.u.ms_result;
430 transport_processing_oncpu(pmsg->nm_packet, hlen, ip);
431 /* msg was embedded in the mbuf, do not reply! */
434 static void
435 ip_input_handler(netmsg_t msg)
437 ip_input(msg->packet.nm_packet);
438 /* msg was embedded in the mbuf, do not reply! */
442 * IP input routine. Checksum and byte swap header. If fragmented
443 * try to reassemble. Process options. Pass to next level.
445 void
446 ip_input(struct mbuf *m)
448 struct ip *ip;
449 struct in_ifaddr *ia = NULL;
450 struct in_ifaddr_container *iac;
451 int hlen, checkif;
452 u_short sum;
453 struct in_addr pkt_dst;
454 boolean_t using_srcrt = FALSE; /* forward (by PFIL_HOOKS) */
455 struct in_addr odst; /* original dst address(NAT) */
456 struct m_tag *mtag;
457 struct sockaddr_in *next_hop = NULL;
458 lwkt_port_t port;
459 #ifdef FAST_IPSEC
460 struct tdb_ident *tdbi;
461 struct secpolicy *sp;
462 int error;
463 #endif
465 M_ASSERTPKTHDR(m);
468 * This routine is called from numerous places which may not have
469 * characterized the packet.
471 ip = mtod(m, struct ip *);
472 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
473 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK))) {
475 * Force hash recalculation for fragments and multicast
476 * packets; hardware may not do it correctly.
477 * XXX add flag to indicate the hash is from hardware
479 m->m_flags &= ~M_HASH;
481 if ((m->m_flags & M_HASH) == 0) {
482 ip_hashfn(&m, 0);
483 if (m == NULL)
484 return;
485 KKASSERT(m->m_flags & M_HASH);
487 if (&curthread->td_msgport !=
488 netisr_hashport(m->m_pkthdr.hash)) {
489 netisr_queue(NETISR_IP, m);
490 /* Requeued to other netisr msgport; done */
491 return;
494 /* mbuf could have been changed */
495 ip = mtod(m, struct ip *);
499 * Pull out certain tags
501 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) {
502 /* Next hop */
503 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
504 KKASSERT(mtag != NULL);
505 next_hop = m_tag_data(mtag);
508 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
509 /* dummynet already filtered us */
510 ip = mtod(m, struct ip *);
511 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
512 goto iphack;
515 ipstat.ips_total++;
517 /* length checks already done in ip_hashfn() */
518 KASSERT(m->m_len >= sizeof(struct ip), ("IP header not in one mbuf"));
520 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
521 ipstat.ips_badvers++;
522 goto bad;
525 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
526 /* length checks already done in ip_hashfn() */
527 KASSERT(hlen >= sizeof(struct ip), ("IP header len too small"));
528 KASSERT(m->m_len >= hlen, ("complete IP header not in one mbuf"));
530 /* 127/8 must not appear on wire - RFC1122 */
531 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
532 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
533 if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK)) {
534 ipstat.ips_badaddr++;
535 goto bad;
539 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
540 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
541 } else {
542 if (hlen == sizeof(struct ip))
543 sum = in_cksum_hdr(ip);
544 else
545 sum = in_cksum(m, hlen);
547 if (sum != 0) {
548 ipstat.ips_badsum++;
549 goto bad;
552 #ifdef ALTQ
553 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) {
554 /* packet is dropped by traffic conditioner */
555 return;
557 #endif
559 * Convert fields to host representation.
561 ip->ip_len = ntohs(ip->ip_len);
562 ip->ip_off = ntohs(ip->ip_off);
564 /* length checks already done in ip_hashfn() */
565 KASSERT(ip->ip_len >= hlen, ("total length less then header length"));
566 KASSERT(m->m_pkthdr.len >= ip->ip_len, ("mbuf too short"));
569 * Trim mbufs if longer than the IP header would have us expect.
571 if (m->m_pkthdr.len > ip->ip_len) {
572 if (m->m_len == m->m_pkthdr.len) {
573 m->m_len = ip->ip_len;
574 m->m_pkthdr.len = ip->ip_len;
575 } else {
576 m_adj(m, ip->ip_len - m->m_pkthdr.len);
579 #if defined(IPSEC) && !defined(IPSEC_FILTERGIF)
581 * Bypass packet filtering for packets from a tunnel (gif).
583 if (ipsec_gethist(m, NULL))
584 goto pass;
585 #endif
588 * IpHack's section.
589 * Right now when no processing on packet has done
590 * and it is still fresh out of network we do our black
591 * deals with it.
592 * - Firewall: deny/allow/divert
593 * - Xlate: translate packet's addr/port (NAT).
594 * - Pipe: pass pkt through dummynet.
595 * - Wrap: fake packet's addr/port <unimpl.>
596 * - Encapsulate: put it in another IP and send out. <unimp.>
599 iphack:
601 * If we've been forwarded from the output side, then
602 * skip the firewall a second time
604 if (next_hop != NULL)
605 goto ours;
607 /* No pfil hooks */
608 if (!pfil_has_hooks(&inet_pfil_hook)) {
609 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
611 * Strip dummynet tags from stranded packets
613 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
614 KKASSERT(mtag != NULL);
615 m_tag_delete(m, mtag);
616 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
618 goto pass;
622 * Run through list of hooks for input packets.
624 * NOTE! If the packet is rewritten pf/ipfw/whoever must
625 * clear M_HASH.
627 odst = ip->ip_dst;
628 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, PFIL_IN))
629 return;
630 if (m == NULL) /* consumed by filter */
631 return;
632 ip = mtod(m, struct ip *);
633 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
634 using_srcrt = (odst.s_addr != ip->ip_dst.s_addr);
636 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) {
637 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
638 KKASSERT(mtag != NULL);
639 next_hop = m_tag_data(mtag);
641 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
642 ip_dn_queue(m);
643 return;
645 if (m->m_pkthdr.fw_flags & FW_MBUF_REDISPATCH) {
646 m->m_pkthdr.fw_flags &= ~FW_MBUF_REDISPATCH;
648 pass:
650 * Process options and, if not destined for us,
651 * ship it on. ip_dooptions returns 1 when an
652 * error was detected (causing an icmp message
653 * to be sent and the original packet to be freed).
655 if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, next_hop))
656 return;
658 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
659 * matter if it is destined to another node, or whether it is
660 * a multicast one, RSVP wants it! and prevents it from being forwarded
661 * anywhere else. Also checks if the rsvp daemon is running before
662 * grabbing the packet.
664 if (rsvp_on && ip->ip_p == IPPROTO_RSVP)
665 goto ours;
668 * Check our list of addresses, to see if the packet is for us.
669 * If we don't have any addresses, assume any unicast packet
670 * we receive might be for us (and let the upper layers deal
671 * with it).
673 if (TAILQ_EMPTY(&in_ifaddrheads[mycpuid]) &&
674 !(m->m_flags & (M_MCAST | M_BCAST)))
675 goto ours;
678 * Cache the destination address of the packet; this may be
679 * changed by use of 'ipfw fwd'.
681 pkt_dst = next_hop ? next_hop->sin_addr : ip->ip_dst;
684 * Enable a consistency check between the destination address
685 * and the arrival interface for a unicast packet (the RFC 1122
686 * strong ES model) if IP forwarding is disabled and the packet
687 * is not locally generated and the packet is not subject to
688 * 'ipfw fwd'.
690 * XXX - Checking also should be disabled if the destination
691 * address is ipnat'ed to a different interface.
693 * XXX - Checking is incompatible with IP aliases added
694 * to the loopback interface instead of the interface where
695 * the packets are received.
697 checkif = ip_checkinterface &&
698 !ipforwarding &&
699 m->m_pkthdr.rcvif != NULL &&
700 !(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) &&
701 next_hop == NULL;
704 * Check for exact addresses in the hash bucket.
706 LIST_FOREACH(iac, INADDR_HASH(pkt_dst.s_addr), ia_hash) {
707 ia = iac->ia;
710 * If the address matches, verify that the packet
711 * arrived via the correct interface if checking is
712 * enabled.
714 if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst.s_addr &&
715 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif))
716 goto ours;
718 ia = NULL;
721 * Check for broadcast addresses.
723 * Only accept broadcast packets that arrive via the matching
724 * interface. Reception of forwarded directed broadcasts would
725 * be handled via ip_forward() and ether_output() with the loopback
726 * into the stack for SIMPLEX interfaces handled by ether_output().
728 if (m->m_pkthdr.rcvif != NULL &&
729 m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) {
730 struct ifaddr_container *ifac;
732 TAILQ_FOREACH(ifac, &m->m_pkthdr.rcvif->if_addrheads[mycpuid],
733 ifa_link) {
734 struct ifaddr *ifa = ifac->ifa;
736 if (ifa->ifa_addr == NULL) /* shutdown/startup race */
737 continue;
738 if (ifa->ifa_addr->sa_family != AF_INET)
739 continue;
740 ia = ifatoia(ifa);
741 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
742 pkt_dst.s_addr)
743 goto ours;
744 if (ia->ia_netbroadcast.s_addr == pkt_dst.s_addr)
745 goto ours;
746 #ifdef BOOTP_COMPAT
747 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY)
748 goto ours;
749 #endif
752 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
753 struct in_multi *inm;
755 if (ip_mrouter != NULL) {
756 /* XXX Multicast routing is not MPSAFE yet */
757 get_mplock();
760 * If we are acting as a multicast router, all
761 * incoming multicast packets are passed to the
762 * kernel-level multicast forwarding function.
763 * The packet is returned (relatively) intact; if
764 * ip_mforward() returns a non-zero value, the packet
765 * must be discarded, else it may be accepted below.
767 if (ip_mforward != NULL &&
768 ip_mforward(ip, m->m_pkthdr.rcvif, m, NULL) != 0) {
769 rel_mplock();
770 ipstat.ips_cantforward++;
771 m_freem(m);
772 return;
775 rel_mplock();
778 * The process-level routing daemon needs to receive
779 * all multicast IGMP packets, whether or not this
780 * host belongs to their destination groups.
782 if (ip->ip_p == IPPROTO_IGMP)
783 goto ours;
784 ipstat.ips_forward++;
787 * See if we belong to the destination multicast group on the
788 * arrival interface.
790 inm = IN_LOOKUP_MULTI(&ip->ip_dst, m->m_pkthdr.rcvif);
791 if (inm == NULL) {
792 ipstat.ips_notmember++;
793 m_freem(m);
794 return;
796 goto ours;
798 if (ip->ip_dst.s_addr == INADDR_BROADCAST)
799 goto ours;
800 if (ip->ip_dst.s_addr == INADDR_ANY)
801 goto ours;
804 * FAITH(Firewall Aided Internet Translator)
806 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) {
807 if (ip_keepfaith) {
808 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
809 goto ours;
811 m_freem(m);
812 return;
816 * Not for us; forward if possible and desirable.
818 if (!ipforwarding) {
819 ipstat.ips_cantforward++;
820 m_freem(m);
821 } else {
822 #ifdef IPSEC
824 * Enforce inbound IPsec SPD.
826 if (ipsec4_in_reject(m, NULL)) {
827 ipsecstat.in_polvio++;
828 goto bad;
830 #endif
831 #ifdef FAST_IPSEC
832 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
833 crit_enter();
834 if (mtag != NULL) {
835 tdbi = (struct tdb_ident *)m_tag_data(mtag);
836 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
837 } else {
838 sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
839 IP_FORWARDING, &error);
841 if (sp == NULL) { /* NB: can happen if error */
842 crit_exit();
843 /*XXX error stat???*/
844 DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/
845 goto bad;
849 * Check security policy against packet attributes.
851 error = ipsec_in_reject(sp, m);
852 KEY_FREESP(&sp);
853 crit_exit();
854 if (error) {
855 ipstat.ips_cantforward++;
856 goto bad;
858 #endif
859 ip_forward(m, using_srcrt, next_hop);
861 return;
863 ours:
866 * IPSTEALTH: Process non-routing options only
867 * if the packet is destined for us.
869 if (ipstealth &&
870 hlen > sizeof(struct ip) &&
871 ip_dooptions(m, 1, next_hop))
872 return;
874 /* Count the packet in the ip address stats */
875 if (ia != NULL) {
876 IFA_STAT_INC(&ia->ia_ifa, ipackets, 1);
877 IFA_STAT_INC(&ia->ia_ifa, ibytes, m->m_pkthdr.len);
881 * If offset or IP_MF are set, must reassemble.
882 * Otherwise, nothing need be done.
883 * (We could look in the reassembly queue to see
884 * if the packet was previously fragmented,
885 * but it's not worth the time; just let them time out.)
887 if (ip->ip_off & (IP_MF | IP_OFFMASK)) {
889 * Attempt reassembly; if it succeeds, proceed. ip_reass()
890 * will return a different mbuf.
892 * NOTE: ip_reass() returns m with M_HASH cleared to force
893 * us to recharacterize the packet.
895 m = ip_reass(m);
896 if (m == NULL)
897 return;
898 ip = mtod(m, struct ip *);
900 /* Get the header length of the reassembled packet */
901 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
902 } else {
903 ip->ip_len -= hlen;
906 #ifdef IPSEC
908 * enforce IPsec policy checking if we are seeing last header.
909 * note that we do not visit this with protocols with pcb layer
910 * code - like udp/tcp/raw ip.
912 if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) &&
913 ipsec4_in_reject(m, NULL)) {
914 ipsecstat.in_polvio++;
915 goto bad;
917 #endif
918 #ifdef FAST_IPSEC
920 * enforce IPsec policy checking if we are seeing last header.
921 * note that we do not visit this with protocols with pcb layer
922 * code - like udp/tcp/raw ip.
924 if (inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) {
926 * Check if the packet has already had IPsec processing
927 * done. If so, then just pass it along. This tag gets
928 * set during AH, ESP, etc. input handling, before the
929 * packet is returned to the ip input queue for delivery.
931 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
932 crit_enter();
933 if (mtag != NULL) {
934 tdbi = (struct tdb_ident *)m_tag_data(mtag);
935 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
936 } else {
937 sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
938 IP_FORWARDING, &error);
940 if (sp != NULL) {
942 * Check security policy against packet attributes.
944 error = ipsec_in_reject(sp, m);
945 KEY_FREESP(&sp);
946 } else {
947 /* XXX error stat??? */
948 error = EINVAL;
949 DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/
950 crit_exit();
951 goto bad;
953 crit_exit();
954 if (error)
955 goto bad;
957 #endif /* FAST_IPSEC */
960 * We must forward the packet to the correct protocol thread if
961 * we are not already in it.
963 * NOTE: ip_len is now in host form. ip_len is not adjusted
964 * further for protocol processing, instead we pass hlen
965 * to the protosw and let it deal with it.
967 ipstat.ips_delivered++;
969 if ((m->m_flags & M_HASH) == 0) {
970 #ifdef RSS_DEBUG
971 atomic_add_long(&ip_rehash_count, 1);
972 #endif
973 ip->ip_len = htons(ip->ip_len + hlen);
974 ip->ip_off = htons(ip->ip_off);
976 ip_hashfn(&m, 0);
977 if (m == NULL)
978 return;
980 ip = mtod(m, struct ip *);
981 ip->ip_len = ntohs(ip->ip_len) - hlen;
982 ip->ip_off = ntohs(ip->ip_off);
983 KKASSERT(m->m_flags & M_HASH);
985 port = netisr_hashport(m->m_pkthdr.hash);
987 if (port != &curthread->td_msgport) {
988 struct netmsg_packet *pmsg;
990 #ifdef RSS_DEBUG
991 atomic_add_long(&ip_dispatch_slow, 1);
992 #endif
994 pmsg = &m->m_hdr.mh_netmsg;
995 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport,
996 0, transport_processing_handler);
997 pmsg->nm_packet = m;
998 pmsg->base.lmsg.u.ms_result = hlen;
999 lwkt_sendmsg(port, &pmsg->base.lmsg);
1000 } else {
1001 #ifdef RSS_DEBUG
1002 atomic_add_long(&ip_dispatch_fast, 1);
1003 #endif
1004 transport_processing_oncpu(m, hlen, ip);
1006 return;
1008 bad:
1009 m_freem(m);
1013 * Take incoming datagram fragment and try to reassemble it into
1014 * whole datagram. If a chain for reassembly of this datagram already
1015 * exists, then it is given as fp; otherwise have to make a chain.
1017 struct mbuf *
1018 ip_reass(struct mbuf *m)
1020 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid];
1021 struct ip *ip = mtod(m, struct ip *);
1022 struct mbuf *p = NULL, *q, *nq;
1023 struct mbuf *n;
1024 struct ipq *fp = NULL;
1025 struct ipqhead *head;
1026 int hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1027 int i, next;
1028 u_short sum;
1030 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
1031 if (maxnipq == 0 || maxfragsperpacket == 0) {
1032 ipstat.ips_fragments++;
1033 ipstat.ips_fragdropped++;
1034 m_freem(m);
1035 return NULL;
1038 sum = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
1040 * Look for queue of fragments of this datagram.
1042 head = &fragq->ipq[sum];
1043 TAILQ_FOREACH(fp, head, ipq_list) {
1044 if (ip->ip_id == fp->ipq_id &&
1045 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
1046 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
1047 ip->ip_p == fp->ipq_p)
1048 goto found;
1051 fp = NULL;
1054 * Enforce upper bound on number of fragmented packets
1055 * for which we attempt reassembly;
1056 * If maxnipq is -1, accept all fragments without limitation.
1058 if (fragq->nipq > maxnipq && maxnipq > 0) {
1060 * drop something from the tail of the current queue
1061 * before proceeding further
1063 struct ipq *q = TAILQ_LAST(head, ipqhead);
1064 if (q == NULL) {
1066 * The current queue is empty,
1067 * so drop from one of the others.
1069 for (i = 0; i < IPREASS_NHASH; i++) {
1070 struct ipq *r = TAILQ_LAST(&fragq->ipq[i],
1071 ipqhead);
1072 if (r) {
1073 ipstat.ips_fragtimeout += r->ipq_nfrags;
1074 ip_freef(fragq, &fragq->ipq[i], r);
1075 break;
1078 } else {
1079 ipstat.ips_fragtimeout += q->ipq_nfrags;
1080 ip_freef(fragq, head, q);
1083 found:
1085 * Adjust ip_len to not reflect header,
1086 * convert offset of this to bytes.
1088 ip->ip_len -= hlen;
1089 if (ip->ip_off & IP_MF) {
1091 * Make sure that fragments have a data length
1092 * that's a non-zero multiple of 8 bytes.
1094 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
1095 ipstat.ips_toosmall++; /* XXX */
1096 m_freem(m);
1097 goto done;
1099 m->m_flags |= M_FRAG;
1100 } else {
1101 m->m_flags &= ~M_FRAG;
1103 ip->ip_off <<= 3;
1105 ipstat.ips_fragments++;
1106 m->m_pkthdr.header = ip;
1109 * If the hardware has not done csum over this fragment
1110 * then csum_data is not valid at all.
1112 if ((m->m_pkthdr.csum_flags & (CSUM_FRAG_NOT_CHECKED | CSUM_DATA_VALID))
1113 == (CSUM_FRAG_NOT_CHECKED | CSUM_DATA_VALID)) {
1114 m->m_pkthdr.csum_data = 0;
1115 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1119 * Presence of header sizes in mbufs
1120 * would confuse code below.
1122 m->m_data += hlen;
1123 m->m_len -= hlen;
1126 * If first fragment to arrive, create a reassembly queue.
1128 if (fp == NULL) {
1129 if ((fp = mpipe_alloc_nowait(&ipq_mpipe)) == NULL)
1130 goto dropfrag;
1131 TAILQ_INSERT_HEAD(head, fp, ipq_list);
1132 fragq->nipq++;
1133 fp->ipq_nfrags = 1;
1134 fp->ipq_ttl = IPFRAGTTL;
1135 fp->ipq_p = ip->ip_p;
1136 fp->ipq_id = ip->ip_id;
1137 fp->ipq_src = ip->ip_src;
1138 fp->ipq_dst = ip->ip_dst;
1139 fp->ipq_frags = m;
1140 m->m_nextpkt = NULL;
1141 goto inserted;
1143 fp->ipq_nfrags++;
1145 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
1148 * Find a segment which begins after this one does.
1150 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1151 if (GETIP(q)->ip_off > ip->ip_off)
1152 break;
1156 * If there is a preceding segment, it may provide some of
1157 * our data already. If so, drop the data from the incoming
1158 * segment. If it provides all of our data, drop us, otherwise
1159 * stick new segment in the proper place.
1161 * If some of the data is dropped from the the preceding
1162 * segment, then it's checksum is invalidated.
1164 if (p) {
1165 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
1166 if (i > 0) {
1167 if (i >= ip->ip_len)
1168 goto dropfrag;
1169 m_adj(m, i);
1170 m->m_pkthdr.csum_flags = 0;
1171 ip->ip_off += i;
1172 ip->ip_len -= i;
1174 m->m_nextpkt = p->m_nextpkt;
1175 p->m_nextpkt = m;
1176 } else {
1177 m->m_nextpkt = fp->ipq_frags;
1178 fp->ipq_frags = m;
1182 * While we overlap succeeding segments trim them or,
1183 * if they are completely covered, dequeue them.
1185 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
1186 q = nq) {
1187 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
1188 if (i < GETIP(q)->ip_len) {
1189 GETIP(q)->ip_len -= i;
1190 GETIP(q)->ip_off += i;
1191 m_adj(q, i);
1192 q->m_pkthdr.csum_flags = 0;
1193 break;
1195 nq = q->m_nextpkt;
1196 m->m_nextpkt = nq;
1197 ipstat.ips_fragdropped++;
1198 fp->ipq_nfrags--;
1199 q->m_nextpkt = NULL;
1200 m_freem(q);
1203 inserted:
1205 * Check for complete reassembly and perform frag per packet
1206 * limiting.
1208 * Frag limiting is performed here so that the nth frag has
1209 * a chance to complete the packet before we drop the packet.
1210 * As a result, n+1 frags are actually allowed per packet, but
1211 * only n will ever be stored. (n = maxfragsperpacket.)
1214 next = 0;
1215 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1216 if (GETIP(q)->ip_off != next) {
1217 if (fp->ipq_nfrags > maxfragsperpacket) {
1218 ipstat.ips_fragdropped += fp->ipq_nfrags;
1219 ip_freef(fragq, head, fp);
1221 goto done;
1223 next += GETIP(q)->ip_len;
1225 /* Make sure the last packet didn't have the IP_MF flag */
1226 if (p->m_flags & M_FRAG) {
1227 if (fp->ipq_nfrags > maxfragsperpacket) {
1228 ipstat.ips_fragdropped += fp->ipq_nfrags;
1229 ip_freef(fragq, head, fp);
1231 goto done;
1235 * Reassembly is complete. Make sure the packet is a sane size.
1237 q = fp->ipq_frags;
1238 ip = GETIP(q);
1239 if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) {
1240 ipstat.ips_toolong++;
1241 ipstat.ips_fragdropped += fp->ipq_nfrags;
1242 ip_freef(fragq, head, fp);
1243 goto done;
1247 * Concatenate fragments.
1249 m = q;
1250 n = m->m_next;
1251 m->m_next = NULL;
1252 m_cat(m, n);
1253 nq = q->m_nextpkt;
1254 q->m_nextpkt = NULL;
1255 for (q = nq; q != NULL; q = nq) {
1256 nq = q->m_nextpkt;
1257 q->m_nextpkt = NULL;
1258 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
1259 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
1260 m_cat(m, q);
1264 * Clean up the 1's complement checksum. Carry over 16 bits must
1265 * be added back. This assumes no more then 65535 packet fragments
1266 * were reassembled. A second carry can also occur (but not a third).
1268 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
1269 (m->m_pkthdr.csum_data >> 16);
1270 if (m->m_pkthdr.csum_data > 0xFFFF)
1271 m->m_pkthdr.csum_data -= 0xFFFF;
1274 * Create header for new ip packet by
1275 * modifying header of first packet;
1276 * dequeue and discard fragment reassembly header.
1277 * Make header visible.
1279 ip->ip_len = next;
1280 ip->ip_src = fp->ipq_src;
1281 ip->ip_dst = fp->ipq_dst;
1282 TAILQ_REMOVE(head, fp, ipq_list);
1283 fragq->nipq--;
1284 mpipe_free(&ipq_mpipe, fp);
1285 m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
1286 m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
1287 /* some debugging cruft by sklower, below, will go away soon */
1288 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
1289 int plen = 0;
1291 for (n = m; n; n = n->m_next)
1292 plen += n->m_len;
1293 m->m_pkthdr.len = plen;
1297 * Reassembly complete, return the next protocol.
1299 * Be sure to clear M_HASH to force the packet
1300 * to be re-characterized.
1302 * Clear M_FRAG, we are no longer a fragment.
1304 m->m_flags &= ~(M_HASH | M_FRAG);
1306 ipstat.ips_reassembled++;
1307 return (m);
1309 dropfrag:
1310 ipstat.ips_fragdropped++;
1311 if (fp != NULL)
1312 fp->ipq_nfrags--;
1313 m_freem(m);
1314 done:
1315 return (NULL);
1317 #undef GETIP
1321 * Free a fragment reassembly header and all
1322 * associated datagrams.
1324 static void
1325 ip_freef(struct ipfrag_queue *fragq, struct ipqhead *fhp, struct ipq *fp)
1327 struct mbuf *q;
1330 * Remove first to protect against blocking
1332 TAILQ_REMOVE(fhp, fp, ipq_list);
1335 * Clean out at our leisure
1337 while (fp->ipq_frags) {
1338 q = fp->ipq_frags;
1339 fp->ipq_frags = q->m_nextpkt;
1340 q->m_nextpkt = NULL;
1341 m_freem(q);
1343 mpipe_free(&ipq_mpipe, fp);
1344 fragq->nipq--;
1348 * If a timer expires on a reassembly queue, discard it.
1350 static void
1351 ipfrag_timeo_dispatch(netmsg_t nmsg)
1353 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid];
1354 struct ipq *fp, *fp_temp;
1355 struct ipqhead *head;
1356 int i;
1358 crit_enter();
1359 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */
1360 crit_exit();
1362 for (i = 0; i < IPREASS_NHASH; i++) {
1363 head = &fragq->ipq[i];
1364 TAILQ_FOREACH_MUTABLE(fp, head, ipq_list, fp_temp) {
1365 if (--fp->ipq_ttl == 0) {
1366 ipstat.ips_fragtimeout += fp->ipq_nfrags;
1367 ip_freef(fragq, head, fp);
1372 * If we are over the maximum number of fragments
1373 * (due to the limit being lowered), drain off
1374 * enough to get down to the new limit.
1376 if (maxnipq >= 0 && fragq->nipq > maxnipq) {
1377 for (i = 0; i < IPREASS_NHASH; i++) {
1378 head = &fragq->ipq[i];
1379 while (fragq->nipq > maxnipq && !TAILQ_EMPTY(head)) {
1380 ipstat.ips_fragdropped +=
1381 TAILQ_FIRST(head)->ipq_nfrags;
1382 ip_freef(fragq, head, TAILQ_FIRST(head));
1388 static void
1389 ipfrag_timeo_ipi(void *arg __unused)
1391 int cpu = mycpuid;
1392 struct lwkt_msg *msg = &ipfrag_queue_pcpu[cpu].timeo_netmsg.lmsg;
1394 ipfrag_queue_pcpu[cpu].timeo_inprog = 0;
1395 crit_enter();
1396 if (msg->ms_flags & MSGF_DONE)
1397 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg);
1398 crit_exit();
1401 static void
1402 ipfrag_slowtimo(void)
1404 cpumask_t mask;
1405 int i;
1407 CPUMASK_ASSZERO(mask);
1408 for (i = 0; i < ncpus; ++i) {
1409 if (ipfrag_queue_pcpu[i].nipq &&
1410 ipfrag_queue_pcpu[i].timeo_inprog == 0) {
1411 ipfrag_queue_pcpu[i].timeo_inprog = 1;
1412 CPUMASK_ORBIT(mask, i);
1415 CPUMASK_ANDMASK(mask, smp_active_mask);
1416 if (CPUMASK_TESTNZERO(mask))
1417 lwkt_send_ipiq_mask(mask, ipfrag_timeo_ipi, NULL);
1421 * IP timer processing
1423 void
1424 ip_slowtimo(void)
1426 ipfrag_slowtimo();
1427 ipflow_slowtimo();
1431 * Drain off all datagram fragments.
1433 static void
1434 ipfrag_drain_dispatch(netmsg_t nmsg)
1436 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid];
1437 struct ipqhead *head;
1438 int i;
1440 crit_enter();
1441 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */
1442 crit_exit();
1444 for (i = 0; i < IPREASS_NHASH; i++) {
1445 head = &fragq->ipq[i];
1446 while (!TAILQ_EMPTY(head)) {
1447 ipstat.ips_fragdropped += TAILQ_FIRST(head)->ipq_nfrags;
1448 ip_freef(fragq, head, TAILQ_FIRST(head));
1453 static void
1454 ipfrag_drain_ipi(void *arg __unused)
1456 int cpu = mycpuid;
1457 struct lwkt_msg *msg = &ipfrag_queue_pcpu[cpu].drain_netmsg.lmsg;
1459 crit_enter();
1460 if (msg->ms_flags & MSGF_DONE)
1461 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg);
1462 crit_exit();
1465 static void
1466 ipfrag_drain(void)
1468 cpumask_t mask;
1470 CPUMASK_ASSBMASK(mask, ncpus);
1471 CPUMASK_ANDMASK(mask, smp_active_mask);
1472 if (CPUMASK_TESTNZERO(mask))
1473 lwkt_send_ipiq_mask(mask, ipfrag_drain_ipi, NULL);
1476 void
1477 ip_drain(void)
1479 ipfrag_drain();
1480 in_rtqdrain();
1484 * Do option processing on a datagram,
1485 * possibly discarding it if bad options are encountered,
1486 * or forwarding it if source-routed.
1487 * The pass argument is used when operating in the IPSTEALTH
1488 * mode to tell what options to process:
1489 * [LS]SRR (pass 0) or the others (pass 1).
1490 * The reason for as many as two passes is that when doing IPSTEALTH,
1491 * non-routing options should be processed only if the packet is for us.
1492 * Returns 1 if packet has been forwarded/freed,
1493 * 0 if the packet should be processed further.
1495 static int
1496 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop)
1498 struct sockaddr_in ipaddr = { sizeof ipaddr, AF_INET };
1499 struct ip *ip = mtod(m, struct ip *);
1500 u_char *cp;
1501 struct in_ifaddr *ia;
1502 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB;
1503 boolean_t forward = FALSE;
1504 struct in_addr *sin, dst;
1505 n_time ntime;
1507 dst = ip->ip_dst;
1508 cp = (u_char *)(ip + 1);
1509 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
1510 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1511 opt = cp[IPOPT_OPTVAL];
1512 if (opt == IPOPT_EOL)
1513 break;
1514 if (opt == IPOPT_NOP)
1515 optlen = 1;
1516 else {
1517 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
1518 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1519 goto bad;
1521 optlen = cp[IPOPT_OLEN];
1522 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
1523 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1524 goto bad;
1527 switch (opt) {
1529 default:
1530 break;
1533 * Source routing with record.
1534 * Find interface with current destination address.
1535 * If none on this machine then drop if strictly routed,
1536 * or do nothing if loosely routed.
1537 * Record interface address and bring up next address
1538 * component. If strictly routed make sure next
1539 * address is on directly accessible net.
1541 case IPOPT_LSRR:
1542 case IPOPT_SSRR:
1543 if (ipstealth && pass > 0)
1544 break;
1545 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1546 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1547 goto bad;
1549 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1550 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1551 goto bad;
1553 ipaddr.sin_addr = ip->ip_dst;
1554 ia = (struct in_ifaddr *)
1555 ifa_ifwithaddr((struct sockaddr *)&ipaddr);
1556 if (ia == NULL) {
1557 if (opt == IPOPT_SSRR) {
1558 type = ICMP_UNREACH;
1559 code = ICMP_UNREACH_SRCFAIL;
1560 goto bad;
1562 if (!ip_dosourceroute)
1563 goto nosourcerouting;
1565 * Loose routing, and not at next destination
1566 * yet; nothing to do except forward.
1568 break;
1570 off--; /* 0 origin */
1571 if (off > optlen - (int)sizeof(struct in_addr)) {
1573 * End of source route. Should be for us.
1575 if (!ip_acceptsourceroute)
1576 goto nosourcerouting;
1577 save_rte(m, cp, ip->ip_src);
1578 break;
1580 if (ipstealth)
1581 goto dropit;
1582 if (!ip_dosourceroute) {
1583 if (ipforwarding) {
1584 char sbuf[INET_ADDRSTRLEN];
1585 char dbuf[INET_ADDRSTRLEN];
1588 * Acting as a router, so generate ICMP
1590 nosourcerouting:
1591 log(LOG_WARNING,
1592 "attempted source route from %s to %s\n",
1593 kinet_ntoa(ip->ip_src, sbuf),
1594 kinet_ntoa(ip->ip_dst, dbuf));
1595 type = ICMP_UNREACH;
1596 code = ICMP_UNREACH_SRCFAIL;
1597 goto bad;
1598 } else {
1600 * Not acting as a router,
1601 * so silently drop.
1603 dropit:
1604 ipstat.ips_cantforward++;
1605 m_freem(m);
1606 return (1);
1611 * locate outgoing interface
1613 memcpy(&ipaddr.sin_addr, cp + off,
1614 sizeof ipaddr.sin_addr);
1616 if (opt == IPOPT_SSRR) {
1617 #define INA struct in_ifaddr *
1618 #define SA struct sockaddr *
1619 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr))
1620 == NULL)
1621 ia = (INA)ifa_ifwithnet((SA)&ipaddr);
1622 } else {
1623 ia = ip_rtaddr(ipaddr.sin_addr, NULL);
1625 if (ia == NULL) {
1626 type = ICMP_UNREACH;
1627 code = ICMP_UNREACH_SRCFAIL;
1628 goto bad;
1630 ip->ip_dst = ipaddr.sin_addr;
1631 memcpy(cp + off, &IA_SIN(ia)->sin_addr,
1632 sizeof(struct in_addr));
1633 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1635 * Let ip_intr's mcast routing check handle mcast pkts
1637 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
1638 break;
1640 case IPOPT_RR:
1641 if (ipstealth && pass == 0)
1642 break;
1643 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1644 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1645 goto bad;
1647 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1648 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1649 goto bad;
1652 * If no space remains, ignore.
1654 off--; /* 0 origin */
1655 if (off > optlen - (int)sizeof(struct in_addr))
1656 break;
1657 memcpy(&ipaddr.sin_addr, &ip->ip_dst,
1658 sizeof ipaddr.sin_addr);
1660 * locate outgoing interface; if we're the destination,
1661 * use the incoming interface (should be same).
1663 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == NULL &&
1664 (ia = ip_rtaddr(ipaddr.sin_addr, NULL)) == NULL) {
1665 type = ICMP_UNREACH;
1666 code = ICMP_UNREACH_HOST;
1667 goto bad;
1669 memcpy(cp + off, &IA_SIN(ia)->sin_addr,
1670 sizeof(struct in_addr));
1671 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1672 break;
1674 case IPOPT_TS:
1675 if (ipstealth && pass == 0)
1676 break;
1677 code = cp - (u_char *)ip;
1678 if (optlen < 4 || optlen > 40) {
1679 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1680 goto bad;
1682 if ((off = cp[IPOPT_OFFSET]) < 5) {
1683 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1684 goto bad;
1686 if (off > optlen - (int)sizeof(int32_t)) {
1687 cp[IPOPT_OFFSET + 1] += (1 << 4);
1688 if ((cp[IPOPT_OFFSET + 1] & 0xf0) == 0) {
1689 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1690 goto bad;
1692 break;
1694 off--; /* 0 origin */
1695 sin = (struct in_addr *)(cp + off);
1696 switch (cp[IPOPT_OFFSET + 1] & 0x0f) {
1698 case IPOPT_TS_TSONLY:
1699 break;
1701 case IPOPT_TS_TSANDADDR:
1702 if (off + sizeof(n_time) +
1703 sizeof(struct in_addr) > optlen) {
1704 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1705 goto bad;
1707 ipaddr.sin_addr = dst;
1708 ia = (INA)ifaof_ifpforaddr((SA)&ipaddr,
1709 m->m_pkthdr.rcvif);
1710 if (ia == NULL)
1711 continue;
1712 memcpy(sin, &IA_SIN(ia)->sin_addr,
1713 sizeof(struct in_addr));
1714 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1715 off += sizeof(struct in_addr);
1716 break;
1718 case IPOPT_TS_PRESPEC:
1719 if (off + sizeof(n_time) +
1720 sizeof(struct in_addr) > optlen) {
1721 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1722 goto bad;
1724 memcpy(&ipaddr.sin_addr, sin,
1725 sizeof(struct in_addr));
1726 if (ifa_ifwithaddr((SA)&ipaddr) == NULL)
1727 continue;
1728 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1729 off += sizeof(struct in_addr);
1730 break;
1732 default:
1733 code = &cp[IPOPT_OFFSET + 1] - (u_char *)ip;
1734 goto bad;
1736 ntime = iptime();
1737 memcpy(cp + off, &ntime, sizeof(n_time));
1738 cp[IPOPT_OFFSET] += sizeof(n_time);
1741 if (forward && ipforwarding) {
1742 ip_forward(m, TRUE, next_hop);
1743 return (1);
1745 return (0);
1746 bad:
1747 icmp_error(m, type, code, 0, 0);
1748 ipstat.ips_badoptions++;
1749 return (1);
1753 * Given address of next destination (final or next hop),
1754 * return internet address info of interface to be used to get there.
1756 struct in_ifaddr *
1757 ip_rtaddr(struct in_addr dst, struct route *ro0)
1759 struct route sro, *ro;
1760 struct sockaddr_in *sin;
1761 struct in_ifaddr *ia;
1763 if (ro0 != NULL) {
1764 ro = ro0;
1765 } else {
1766 bzero(&sro, sizeof(sro));
1767 ro = &sro;
1770 sin = (struct sockaddr_in *)&ro->ro_dst;
1772 if (ro->ro_rt == NULL || dst.s_addr != sin->sin_addr.s_addr) {
1773 if (ro->ro_rt != NULL) {
1774 RTFREE(ro->ro_rt);
1775 ro->ro_rt = NULL;
1777 sin->sin_family = AF_INET;
1778 sin->sin_len = sizeof *sin;
1779 sin->sin_addr = dst;
1780 rtalloc_ign(ro, RTF_PRCLONING);
1783 if (ro->ro_rt == NULL)
1784 return (NULL);
1786 ia = ifatoia(ro->ro_rt->rt_ifa);
1788 if (ro == &sro)
1789 RTFREE(ro->ro_rt);
1790 return ia;
1794 * Save incoming source route for use in replies,
1795 * to be picked up later by ip_srcroute if the receiver is interested.
1797 static void
1798 save_rte(struct mbuf *m, u_char *option, struct in_addr dst)
1800 struct m_tag *mtag;
1801 struct ip_srcrt_opt *opt;
1802 unsigned olen;
1804 mtag = m_tag_get(PACKET_TAG_IPSRCRT, sizeof(*opt), M_NOWAIT);
1805 if (mtag == NULL)
1806 return;
1807 opt = m_tag_data(mtag);
1809 olen = option[IPOPT_OLEN];
1810 #ifdef DIAGNOSTIC
1811 if (ipprintfs)
1812 kprintf("save_rte: olen %d\n", olen);
1813 #endif
1814 if (olen > sizeof(opt->ip_srcrt) - (1 + sizeof(dst))) {
1815 m_tag_free(mtag);
1816 return;
1818 bcopy(option, opt->ip_srcrt.srcopt, olen);
1819 opt->ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
1820 opt->ip_srcrt.dst = dst;
1821 m_tag_prepend(m, mtag);
1825 * Retrieve incoming source route for use in replies,
1826 * in the same form used by setsockopt.
1827 * The first hop is placed before the options, will be removed later.
1829 struct mbuf *
1830 ip_srcroute(struct mbuf *m0)
1832 struct in_addr *p, *q;
1833 struct mbuf *m;
1834 struct m_tag *mtag;
1835 struct ip_srcrt_opt *opt;
1837 if (m0 == NULL)
1838 return NULL;
1840 mtag = m_tag_find(m0, PACKET_TAG_IPSRCRT, NULL);
1841 if (mtag == NULL)
1842 return NULL;
1843 opt = m_tag_data(mtag);
1845 if (opt->ip_nhops == 0)
1846 return (NULL);
1847 m = m_get(M_NOWAIT, MT_HEADER);
1848 if (m == NULL)
1849 return (NULL);
1851 #define OPTSIZ (sizeof(opt->ip_srcrt.nop) + sizeof(opt->ip_srcrt.srcopt))
1853 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
1854 m->m_len = opt->ip_nhops * sizeof(struct in_addr) +
1855 sizeof(struct in_addr) + OPTSIZ;
1856 #ifdef DIAGNOSTIC
1857 if (ipprintfs) {
1858 kprintf("ip_srcroute: nhops %d mlen %d",
1859 opt->ip_nhops, m->m_len);
1861 #endif
1864 * First save first hop for return route
1866 p = &opt->ip_srcrt.route[opt->ip_nhops - 1];
1867 *(mtod(m, struct in_addr *)) = *p--;
1868 #ifdef DIAGNOSTIC
1869 if (ipprintfs)
1870 kprintf(" hops %x", ntohl(mtod(m, struct in_addr *)->s_addr));
1871 #endif
1874 * Copy option fields and padding (nop) to mbuf.
1876 opt->ip_srcrt.nop = IPOPT_NOP;
1877 opt->ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
1878 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &opt->ip_srcrt.nop,
1879 OPTSIZ);
1880 q = (struct in_addr *)(mtod(m, caddr_t) +
1881 sizeof(struct in_addr) + OPTSIZ);
1882 #undef OPTSIZ
1884 * Record return path as an IP source route,
1885 * reversing the path (pointers are now aligned).
1887 while (p >= opt->ip_srcrt.route) {
1888 #ifdef DIAGNOSTIC
1889 if (ipprintfs)
1890 kprintf(" %x", ntohl(q->s_addr));
1891 #endif
1892 *q++ = *p--;
1895 * Last hop goes to final destination.
1897 *q = opt->ip_srcrt.dst;
1898 m_tag_delete(m0, mtag);
1899 #ifdef DIAGNOSTIC
1900 if (ipprintfs)
1901 kprintf(" %x\n", ntohl(q->s_addr));
1902 #endif
1903 return (m);
1907 * Strip out IP options.
1909 void
1910 ip_stripoptions(struct mbuf *m)
1912 int datalen;
1913 struct ip *ip = mtod(m, struct ip *);
1914 caddr_t opts;
1915 int optlen;
1917 optlen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
1918 opts = (caddr_t)(ip + 1);
1919 datalen = m->m_len - (sizeof(struct ip) + optlen);
1920 bcopy(opts + optlen, opts, datalen);
1921 m->m_len -= optlen;
1922 if (m->m_flags & M_PKTHDR)
1923 m->m_pkthdr.len -= optlen;
1924 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
1927 u_char inetctlerrmap[PRC_NCMDS] = {
1928 0, 0, 0, 0,
1929 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
1930 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
1931 EMSGSIZE, EHOSTUNREACH, 0, 0,
1932 0, 0, 0, 0,
1933 ENOPROTOOPT, ECONNREFUSED
1937 * Forward a packet. If some error occurs return the sender
1938 * an icmp packet. Note we can't always generate a meaningful
1939 * icmp message because icmp doesn't have a large enough repertoire
1940 * of codes and types.
1942 * If not forwarding, just drop the packet. This could be confusing
1943 * if ipforwarding was zero but some routing protocol was advancing
1944 * us as a gateway to somewhere. However, we must let the routing
1945 * protocol deal with that.
1947 * The using_srcrt parameter indicates whether the packet is being forwarded
1948 * via a source route.
1950 void
1951 ip_forward(struct mbuf *m, boolean_t using_srcrt, struct sockaddr_in *next_hop)
1953 struct ip *ip = mtod(m, struct ip *);
1954 struct rtentry *rt;
1955 struct route fwd_ro;
1956 int error, type = 0, code = 0, destmtu = 0;
1957 struct mbuf *mcopy, *mtemp = NULL;
1958 n_long dest;
1959 struct in_addr pkt_dst;
1961 dest = INADDR_ANY;
1963 * Cache the destination address of the packet; this may be
1964 * changed by use of 'ipfw fwd'.
1966 pkt_dst = (next_hop != NULL) ? next_hop->sin_addr : ip->ip_dst;
1968 #ifdef DIAGNOSTIC
1969 if (ipprintfs)
1970 kprintf("forward: src %x dst %x ttl %x\n",
1971 ip->ip_src.s_addr, pkt_dst.s_addr, ip->ip_ttl);
1972 #endif
1974 if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) {
1975 ipstat.ips_cantforward++;
1976 m_freem(m);
1977 return;
1979 if (!ipstealth && ip->ip_ttl <= IPTTLDEC) {
1980 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0);
1981 return;
1984 bzero(&fwd_ro, sizeof(fwd_ro));
1985 ip_rtaddr(pkt_dst, &fwd_ro);
1986 if (fwd_ro.ro_rt == NULL) {
1987 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
1988 return;
1990 rt = fwd_ro.ro_rt;
1992 if (curthread->td_type == TD_TYPE_NETISR) {
1994 * Save the IP header and at most 8 bytes of the payload,
1995 * in case we need to generate an ICMP message to the src.
1997 mtemp = ipforward_mtemp[mycpuid];
1998 KASSERT((mtemp->m_flags & M_EXT) == 0 &&
1999 mtemp->m_data == mtemp->m_pktdat &&
2000 m_tag_first(mtemp) == NULL,
2001 ("ip_forward invalid mtemp1"));
2003 if (!m_dup_pkthdr(mtemp, m, M_NOWAIT)) {
2005 * It's probably ok if the pkthdr dup fails (because
2006 * the deep copy of the tag chain failed), but for now
2007 * be conservative and just discard the copy since
2008 * code below may some day want the tags.
2010 mtemp = NULL;
2011 } else {
2012 mtemp->m_type = m->m_type;
2013 mtemp->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8,
2014 (int)ip->ip_len);
2015 mtemp->m_pkthdr.len = mtemp->m_len;
2016 m_copydata(m, 0, mtemp->m_len, mtod(mtemp, caddr_t));
2020 if (!ipstealth)
2021 ip->ip_ttl -= IPTTLDEC;
2024 * If forwarding packet using same interface that it came in on,
2025 * perhaps should send a redirect to sender to shortcut a hop.
2026 * Only send redirect if source is sending directly to us,
2027 * and if packet was not source routed (or has any options).
2028 * Also, don't send redirect if forwarding using a default route
2029 * or a route modified by a redirect.
2031 if (rt->rt_ifp == m->m_pkthdr.rcvif &&
2032 !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) &&
2033 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY &&
2034 ipsendredirects && !using_srcrt && next_hop == NULL) {
2035 u_long src = ntohl(ip->ip_src.s_addr);
2036 struct in_ifaddr *rt_ifa = (struct in_ifaddr *)rt->rt_ifa;
2038 if (rt_ifa != NULL &&
2039 (src & rt_ifa->ia_subnetmask) == rt_ifa->ia_subnet) {
2040 if (rt->rt_flags & RTF_GATEWAY)
2041 dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
2042 else
2043 dest = pkt_dst.s_addr;
2045 * Router requirements says to only send
2046 * host redirects.
2048 type = ICMP_REDIRECT;
2049 code = ICMP_REDIRECT_HOST;
2050 #ifdef DIAGNOSTIC
2051 if (ipprintfs)
2052 kprintf("redirect (%d) to %x\n", code, dest);
2053 #endif
2057 error = ip_output(m, NULL, &fwd_ro, IP_FORWARDING, NULL, NULL);
2058 if (error == 0) {
2059 ipstat.ips_forward++;
2060 if (type == 0) {
2061 if (mtemp)
2062 ipflow_create(&fwd_ro, mtemp);
2063 goto done;
2065 ipstat.ips_redirectsent++;
2066 } else {
2067 ipstat.ips_cantforward++;
2070 if (mtemp == NULL)
2071 goto done;
2074 * Errors that do not require generating ICMP message
2076 switch (error) {
2077 case ENOBUFS:
2079 * A router should not generate ICMP_SOURCEQUENCH as
2080 * required in RFC1812 Requirements for IP Version 4 Routers.
2081 * Source quench could be a big problem under DoS attacks,
2082 * or if the underlying interface is rate-limited.
2083 * Those who need source quench packets may re-enable them
2084 * via the net.inet.ip.sendsourcequench sysctl.
2086 if (!ip_sendsourcequench)
2087 goto done;
2088 break;
2090 case EACCES: /* ipfw denied packet */
2091 goto done;
2094 KASSERT((mtemp->m_flags & M_EXT) == 0 &&
2095 mtemp->m_data == mtemp->m_pktdat,
2096 ("ip_forward invalid mtemp2"));
2097 mcopy = m_copym(mtemp, 0, mtemp->m_len, M_NOWAIT);
2098 if (mcopy == NULL)
2099 goto done;
2102 * Send ICMP message.
2104 switch (error) {
2105 case 0: /* forwarded, but need redirect */
2106 /* type, code set above */
2107 break;
2109 case ENETUNREACH: /* shouldn't happen, checked above */
2110 case EHOSTUNREACH:
2111 case ENETDOWN:
2112 case EHOSTDOWN:
2113 default:
2114 type = ICMP_UNREACH;
2115 code = ICMP_UNREACH_HOST;
2116 break;
2118 case EMSGSIZE:
2119 type = ICMP_UNREACH;
2120 code = ICMP_UNREACH_NEEDFRAG;
2121 #ifdef IPSEC
2123 * If the packet is routed over IPsec tunnel, tell the
2124 * originator the tunnel MTU.
2125 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
2126 * XXX quickhack!!!
2128 if (fwd_ro.ro_rt != NULL) {
2129 struct secpolicy *sp = NULL;
2130 int ipsecerror;
2131 int ipsechdr;
2132 struct route *ro;
2134 sp = ipsec4_getpolicybyaddr(mcopy,
2135 IPSEC_DIR_OUTBOUND,
2136 IP_FORWARDING,
2137 &ipsecerror);
2139 if (sp == NULL)
2140 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu;
2141 else {
2142 /* count IPsec header size */
2143 ipsechdr = ipsec4_hdrsiz(mcopy,
2144 IPSEC_DIR_OUTBOUND,
2145 NULL);
2148 * find the correct route for outer IPv4
2149 * header, compute tunnel MTU.
2152 if (sp->req != NULL && sp->req->sav != NULL &&
2153 sp->req->sav->sah != NULL) {
2154 ro = &sp->req->sav->sah->sa_route;
2155 if (ro->ro_rt != NULL &&
2156 ro->ro_rt->rt_ifp != NULL) {
2157 destmtu =
2158 ro->ro_rt->rt_ifp->if_mtu;
2159 destmtu -= ipsechdr;
2163 key_freesp(sp);
2166 #elif defined(FAST_IPSEC)
2168 * If the packet is routed over IPsec tunnel, tell the
2169 * originator the tunnel MTU.
2170 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
2171 * XXX quickhack!!!
2173 if (fwd_ro.ro_rt != NULL) {
2174 struct secpolicy *sp = NULL;
2175 int ipsecerror;
2176 int ipsechdr;
2177 struct route *ro;
2179 sp = ipsec_getpolicybyaddr(mcopy,
2180 IPSEC_DIR_OUTBOUND,
2181 IP_FORWARDING,
2182 &ipsecerror);
2184 if (sp == NULL)
2185 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu;
2186 else {
2187 /* count IPsec header size */
2188 ipsechdr = ipsec4_hdrsiz(mcopy,
2189 IPSEC_DIR_OUTBOUND,
2190 NULL);
2193 * find the correct route for outer IPv4
2194 * header, compute tunnel MTU.
2197 if (sp->req != NULL &&
2198 sp->req->sav != NULL &&
2199 sp->req->sav->sah != NULL) {
2200 ro = &sp->req->sav->sah->sa_route;
2201 if (ro->ro_rt != NULL &&
2202 ro->ro_rt->rt_ifp != NULL) {
2203 destmtu =
2204 ro->ro_rt->rt_ifp->if_mtu;
2205 destmtu -= ipsechdr;
2209 KEY_FREESP(&sp);
2212 #else /* !IPSEC && !FAST_IPSEC */
2213 if (fwd_ro.ro_rt != NULL)
2214 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu;
2215 #endif /*IPSEC*/
2216 ipstat.ips_cantfrag++;
2217 break;
2219 case ENOBUFS:
2220 type = ICMP_SOURCEQUENCH;
2221 code = 0;
2222 break;
2224 case EACCES: /* ipfw denied packet */
2225 panic("ip_forward EACCES should not reach");
2227 icmp_error(mcopy, type, code, dest, destmtu);
2228 done:
2229 if (mtemp != NULL)
2230 m_tag_delete_chain(mtemp);
2231 if (fwd_ro.ro_rt != NULL)
2232 RTFREE(fwd_ro.ro_rt);
2235 void
2236 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
2237 struct mbuf *m)
2239 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
2240 struct timeval tv;
2242 microtime(&tv);
2243 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
2244 SCM_TIMESTAMP, SOL_SOCKET);
2245 if (*mp)
2246 mp = &(*mp)->m_next;
2248 if (inp->inp_flags & INP_RECVDSTADDR) {
2249 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
2250 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
2251 if (*mp)
2252 mp = &(*mp)->m_next;
2254 if (inp->inp_flags & INP_RECVTTL) {
2255 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl,
2256 sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
2257 if (*mp)
2258 mp = &(*mp)->m_next;
2260 #ifdef notyet
2261 /* XXX
2262 * Moving these out of udp_input() made them even more broken
2263 * than they already were.
2265 /* options were tossed already */
2266 if (inp->inp_flags & INP_RECVOPTS) {
2267 *mp = sbcreatecontrol((caddr_t) opts_deleted_above,
2268 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
2269 if (*mp)
2270 mp = &(*mp)->m_next;
2272 /* ip_srcroute doesn't do what we want here, need to fix */
2273 if (inp->inp_flags & INP_RECVRETOPTS) {
2274 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m),
2275 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
2276 if (*mp)
2277 mp = &(*mp)->m_next;
2279 #endif
2280 if (inp->inp_flags & INP_RECVIF) {
2281 struct ifnet *ifp;
2282 struct sdlbuf {
2283 struct sockaddr_dl sdl;
2284 u_char pad[32];
2285 } sdlbuf;
2286 struct sockaddr_dl *sdp;
2287 struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
2289 if (((ifp = m->m_pkthdr.rcvif)) &&
2290 ((ifp->if_index != 0) && (ifp->if_index <= if_index))) {
2291 sdp = IF_LLSOCKADDR(ifp);
2293 * Change our mind and don't try copy.
2295 if ((sdp->sdl_family != AF_LINK) ||
2296 (sdp->sdl_len > sizeof(sdlbuf))) {
2297 goto makedummy;
2299 bcopy(sdp, sdl2, sdp->sdl_len);
2300 } else {
2301 makedummy:
2302 sdl2->sdl_len =
2303 offsetof(struct sockaddr_dl, sdl_data[0]);
2304 sdl2->sdl_family = AF_LINK;
2305 sdl2->sdl_index = 0;
2306 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
2308 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len,
2309 IP_RECVIF, IPPROTO_IP);
2310 if (*mp)
2311 mp = &(*mp)->m_next;
2316 * XXX these routines are called from the upper part of the kernel.
2318 * They could also be moved to ip_mroute.c, since all the RSVP
2319 * handling is done there already.
2322 ip_rsvp_init(struct socket *so)
2324 if (so->so_type != SOCK_RAW ||
2325 so->so_proto->pr_protocol != IPPROTO_RSVP)
2326 return EOPNOTSUPP;
2328 if (ip_rsvpd != NULL)
2329 return EADDRINUSE;
2331 ip_rsvpd = so;
2333 * This may seem silly, but we need to be sure we don't over-increment
2334 * the RSVP counter, in case something slips up.
2336 if (!ip_rsvp_on) {
2337 ip_rsvp_on = 1;
2338 rsvp_on++;
2341 return 0;
2345 ip_rsvp_done(void)
2347 ip_rsvpd = NULL;
2349 * This may seem silly, but we need to be sure we don't over-decrement
2350 * the RSVP counter, in case something slips up.
2352 if (ip_rsvp_on) {
2353 ip_rsvp_on = 0;
2354 rsvp_on--;
2356 return 0;
2360 rsvp_input(struct mbuf **mp, int *offp, int proto)
2362 struct mbuf *m = *mp;
2364 *mp = NULL;
2366 if (rsvp_input_p) { /* call the real one if loaded */
2367 *mp = m;
2368 rsvp_input_p(mp, offp, proto);
2369 return(IPPROTO_DONE);
2372 /* Can still get packets with rsvp_on = 0 if there is a local member
2373 * of the group to which the RSVP packet is addressed. But in this
2374 * case we want to throw the packet away.
2377 if (!rsvp_on) {
2378 m_freem(m);
2379 return(IPPROTO_DONE);
2382 if (ip_rsvpd != NULL) {
2383 *mp = m;
2384 rip_input(mp, offp, proto);
2385 return(IPPROTO_DONE);
2387 /* Drop the packet */
2388 m_freem(m);
2389 return(IPPROTO_DONE);