syscons - Fix NULL pointer access in 0d7c8a4d1cafae68239
[dragonfly.git] / sys / netinet / ip_input.c
blob62ae5b0b8a7231fbc7fc6dca6abd4fc3efae4872
1 /*
2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
35 * Copyright (c) 1982, 1986, 1988, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
62 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
63 * $FreeBSD: src/sys/netinet/ip_input.c,v 1.130.2.52 2003/03/07 07:01:28 silby Exp $
66 #define _IP_VHL
68 #include "opt_bootp.h"
69 #include "opt_ipdn.h"
70 #include "opt_ipdivert.h"
71 #include "opt_ipstealth.h"
72 #include "opt_ipsec.h"
73 #include "opt_rss.h"
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/mpipe.h>
80 #include <sys/domain.h>
81 #include <sys/protosw.h>
82 #include <sys/socket.h>
83 #include <sys/time.h>
84 #include <sys/globaldata.h>
85 #include <sys/thread.h>
86 #include <sys/kernel.h>
87 #include <sys/syslog.h>
88 #include <sys/sysctl.h>
89 #include <sys/in_cksum.h>
90 #include <sys/lock.h>
92 #include <sys/mplock2.h>
94 #include <machine/stdarg.h>
96 #include <net/if.h>
97 #include <net/if_types.h>
98 #include <net/if_var.h>
99 #include <net/if_dl.h>
100 #include <net/pfil.h>
101 #include <net/route.h>
102 #include <net/netisr2.h>
104 #include <netinet/in.h>
105 #include <netinet/in_systm.h>
106 #include <netinet/in_var.h>
107 #include <netinet/ip.h>
108 #include <netinet/in_pcb.h>
109 #include <netinet/ip_var.h>
110 #include <netinet/ip_icmp.h>
111 #include <netinet/ip_divert.h>
112 #include <netinet/ip_flow.h>
114 #include <sys/thread2.h>
115 #include <sys/msgport2.h>
116 #include <net/netmsg2.h>
118 #include <sys/socketvar.h>
120 #include <net/ipfw/ip_fw.h>
121 #include <net/dummynet/ip_dummynet.h>
123 #ifdef IPSEC
124 #include <netinet6/ipsec.h>
125 #include <netproto/key/key.h>
126 #endif
128 #ifdef FAST_IPSEC
129 #include <netproto/ipsec/ipsec.h>
130 #include <netproto/ipsec/key.h>
131 #endif
133 int rsvp_on = 0;
134 static int ip_rsvp_on;
135 struct socket *ip_rsvpd;
137 int ipforwarding = 0;
138 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW,
139 &ipforwarding, 0, "Enable IP forwarding between interfaces");
141 static int ipsendredirects = 1; /* XXX */
142 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
143 &ipsendredirects, 0, "Enable sending IP redirects");
145 int ip_defttl = IPDEFTTL;
146 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
147 &ip_defttl, 0, "Maximum TTL on IP packets");
149 static int ip_dosourceroute = 0;
150 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, CTLFLAG_RW,
151 &ip_dosourceroute, 0, "Enable forwarding source routed IP packets");
153 static int ip_acceptsourceroute = 0;
154 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
155 CTLFLAG_RW, &ip_acceptsourceroute, 0,
156 "Enable accepting source routed IP packets");
158 static int maxnipq;
159 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_RW,
160 &maxnipq, 0,
161 "Maximum number of IPv4 fragment reassembly queue entries");
163 static int maxfragsperpacket;
164 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW,
165 &maxfragsperpacket, 0,
166 "Maximum number of IPv4 fragments allowed per packet");
168 static int ip_sendsourcequench = 0;
169 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW,
170 &ip_sendsourcequench, 0,
171 "Enable the transmission of source quench packets");
173 int ip_do_randomid = 1;
174 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW,
175 &ip_do_randomid, 0,
176 "Assign random ip_id values");
178 * XXX - Setting ip_checkinterface mostly implements the receive side of
179 * the Strong ES model described in RFC 1122, but since the routing table
180 * and transmit implementation do not implement the Strong ES model,
181 * setting this to 1 results in an odd hybrid.
183 * XXX - ip_checkinterface currently must be disabled if you use ipnat
184 * to translate the destination address to another local interface.
186 * XXX - ip_checkinterface must be disabled if you add IP aliases
187 * to the loopback interface instead of the interface where the
188 * packets for those addresses are received.
190 static int ip_checkinterface = 0;
191 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW,
192 &ip_checkinterface, 0, "Verify packet arrives on correct interface");
194 static u_long ip_hash_count = 0;
195 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, hash_count, CTLFLAG_RD,
196 &ip_hash_count, 0, "Number of packets hashed by IP");
198 #ifdef RSS_DEBUG
199 static u_long ip_rehash_count = 0;
200 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, rehash_count, CTLFLAG_RD,
201 &ip_rehash_count, 0, "Number of packets rehashed by IP");
203 static u_long ip_dispatch_fast = 0;
204 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, dispatch_fast_count, CTLFLAG_RD,
205 &ip_dispatch_fast, 0, "Number of packets handled on current CPU");
207 static u_long ip_dispatch_slow = 0;
208 SYSCTL_ULONG(_net_inet_ip, OID_AUTO, dispatch_slow_count, CTLFLAG_RD,
209 &ip_dispatch_slow, 0, "Number of packets messaged to another CPU");
210 #endif
212 #ifdef DIAGNOSTIC
213 static int ipprintfs = 0;
214 #endif
216 extern struct domain inetdomain;
217 extern struct protosw inetsw[];
218 u_char ip_protox[IPPROTO_MAX];
219 struct in_ifaddrhead in_ifaddrheads[MAXCPU]; /* first inet address */
220 struct in_ifaddrhashhead *in_ifaddrhashtbls[MAXCPU];
221 /* inet addr hash table */
222 u_long in_ifaddrhmask; /* mask for hash table */
224 static struct mbuf *ipforward_mtemp[MAXCPU];
226 struct ip_stats ipstats_percpu[MAXCPU] __cachealign;
228 static int
229 sysctl_ipstats(SYSCTL_HANDLER_ARGS)
231 int cpu, error = 0;
233 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
234 if ((error = SYSCTL_OUT(req, &ipstats_percpu[cpu],
235 sizeof(struct ip_stats))))
236 break;
237 if ((error = SYSCTL_IN(req, &ipstats_percpu[cpu],
238 sizeof(struct ip_stats))))
239 break;
242 return (error);
244 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats, (CTLTYPE_OPAQUE | CTLFLAG_RW),
245 0, 0, sysctl_ipstats, "S,ip_stats", "IP statistics");
247 /* Packet reassembly stuff */
248 #define IPREASS_NHASH_LOG2 6
249 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
250 #define IPREASS_HMASK (IPREASS_NHASH - 1)
251 #define IPREASS_HASH(x,y) \
252 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
254 TAILQ_HEAD(ipqhead, ipq);
255 struct ipfrag_queue {
256 int nipq;
257 volatile int draining;
258 struct netmsg_base timeo_netmsg;
259 struct callout timeo_ch;
260 struct netmsg_base drain_netmsg;
261 struct ipqhead ipq[IPREASS_NHASH];
262 } __cachealign;
264 static struct ipfrag_queue ipfrag_queue_pcpu[MAXCPU];
266 #ifdef IPCTL_DEFMTU
267 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
268 &ip_mtu, 0, "Default MTU");
269 #endif
271 #ifdef IPSTEALTH
272 static int ipstealth = 0;
273 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW, &ipstealth, 0, "");
274 #else
275 static const int ipstealth = 0;
276 #endif
278 struct mbuf *(*ip_divert_p)(struct mbuf *, int, int);
280 struct pfil_head inet_pfil_hook;
283 * struct ip_srcrt_opt is used to store packet state while it travels
284 * through the stack.
286 * XXX Note that the code even makes assumptions on the size and
287 * alignment of fields inside struct ip_srcrt so e.g. adding some
288 * fields will break the code. This needs to be fixed.
290 * We need to save the IP options in case a protocol wants to respond
291 * to an incoming packet over the same route if the packet got here
292 * using IP source routing. This allows connection establishment and
293 * maintenance when the remote end is on a network that is not known
294 * to us.
296 struct ip_srcrt {
297 struct in_addr dst; /* final destination */
298 char nop; /* one NOP to align */
299 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */
300 struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)];
303 struct ip_srcrt_opt {
304 int ip_nhops;
305 struct ip_srcrt ip_srcrt;
308 #define IPFRAG_MPIPE_MAX 4096
309 #define MAXIPFRAG_MIN ((IPFRAG_MPIPE_MAX * 2) / 256)
311 #define IPFRAG_TIMEO (hz / PR_SLOWHZ)
313 static MALLOC_DEFINE(M_IPQ, "ipq", "IP Fragment Management");
314 static struct malloc_pipe ipq_mpipe;
316 static void save_rte(struct mbuf *, u_char *, struct in_addr);
317 static int ip_dooptions(struct mbuf *m, int, struct sockaddr_in *);
318 static void ip_freef(struct ipfrag_queue *, struct ipqhead *,
319 struct ipq *);
320 static void ip_input_handler(netmsg_t);
322 static void ipfrag_timeo_dispatch(netmsg_t);
323 static void ipfrag_timeo(void *);
324 static void ipfrag_drain_dispatch(netmsg_t);
327 * IP initialization: fill in IP protocol switch table.
328 * All protocols not implemented in kernel go to raw IP protocol handler.
330 void
331 ip_init(void)
333 struct ipfrag_queue *fragq;
334 struct protosw *pr;
335 int cpu, i;
338 * Make sure we can handle a reasonable number of fragments but
339 * cap it at IPFRAG_MPIPE_MAX.
341 mpipe_init(&ipq_mpipe, M_IPQ, sizeof(struct ipq),
342 IFQ_MAXLEN, IPFRAG_MPIPE_MAX, 0, NULL, NULL, NULL);
345 * Make in_ifaddrhead and in_ifaddrhashtbl available on all CPUs,
346 * since they could be accessed by any threads.
348 for (cpu = 0; cpu < ncpus; ++cpu) {
349 TAILQ_INIT(&in_ifaddrheads[cpu]);
350 in_ifaddrhashtbls[cpu] =
351 hashinit(INADDR_NHASH, M_IFADDR, &in_ifaddrhmask);
354 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
355 if (pr == NULL)
356 panic("ip_init");
357 for (i = 0; i < IPPROTO_MAX; i++)
358 ip_protox[i] = pr - inetsw;
359 for (pr = inetdomain.dom_protosw;
360 pr < inetdomain.dom_protoswNPROTOSW; pr++) {
361 if (pr->pr_domain->dom_family == PF_INET && pr->pr_protocol) {
362 if (pr->pr_protocol != IPPROTO_RAW)
363 ip_protox[pr->pr_protocol] = pr - inetsw;
367 inet_pfil_hook.ph_type = PFIL_TYPE_AF;
368 inet_pfil_hook.ph_af = AF_INET;
369 if ((i = pfil_head_register(&inet_pfil_hook)) != 0) {
370 kprintf("%s: WARNING: unable to register pfil hook, "
371 "error %d\n", __func__, i);
374 maxnipq = (nmbclusters / 32) / netisr_ncpus;
375 if (maxnipq < MAXIPFRAG_MIN)
376 maxnipq = MAXIPFRAG_MIN;
377 maxfragsperpacket = 16;
379 ip_id = time_second & 0xffff; /* time_second survives reboots */
381 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
383 * Initialize IP statistics counters for each CPU.
385 bzero(&ipstats_percpu[cpu], sizeof(struct ip_stats));
388 * Preallocate mbuf template for forwarding
390 MGETHDR(ipforward_mtemp[cpu], M_WAITOK, MT_DATA);
393 * Initialize per-cpu ip fragments queues
395 fragq = &ipfrag_queue_pcpu[cpu];
396 for (i = 0; i < IPREASS_NHASH; i++)
397 TAILQ_INIT(&fragq->ipq[i]);
399 callout_init_mp(&fragq->timeo_ch);
400 netmsg_init(&fragq->timeo_netmsg, NULL, &netisr_adone_rport,
401 MSGF_PRIORITY, ipfrag_timeo_dispatch);
402 netmsg_init(&fragq->drain_netmsg, NULL, &netisr_adone_rport,
403 MSGF_PRIORITY, ipfrag_drain_dispatch);
406 netisr_register(NETISR_IP, ip_input_handler, ip_hashfn);
407 netisr_register_hashcheck(NETISR_IP, ip_hashcheck);
409 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
410 fragq = &ipfrag_queue_pcpu[cpu];
411 callout_reset_bycpu(&fragq->timeo_ch, IPFRAG_TIMEO,
412 ipfrag_timeo, NULL, cpu);
415 ip_porthash_trycount = 2 * netisr_ncpus;
418 /* Do transport protocol processing. */
419 static void
420 transport_processing_oncpu(struct mbuf *m, int hlen, struct ip *ip)
422 const struct protosw *pr = &inetsw[ip_protox[ip->ip_p]];
425 * Switch out to protocol's input routine.
427 PR_GET_MPLOCK(pr);
428 pr->pr_input(&m, &hlen, ip->ip_p);
429 PR_REL_MPLOCK(pr);
432 static void
433 transport_processing_handler(netmsg_t msg)
435 struct netmsg_packet *pmsg = &msg->packet;
436 struct ip *ip;
437 int hlen;
439 ip = mtod(pmsg->nm_packet, struct ip *);
440 hlen = pmsg->base.lmsg.u.ms_result;
442 transport_processing_oncpu(pmsg->nm_packet, hlen, ip);
443 /* msg was embedded in the mbuf, do not reply! */
446 static void
447 ip_input_handler(netmsg_t msg)
449 ip_input(msg->packet.nm_packet);
450 /* msg was embedded in the mbuf, do not reply! */
454 * IP input routine. Checksum and byte swap header. If fragmented
455 * try to reassemble. Process options. Pass to next level.
457 void
458 ip_input(struct mbuf *m)
460 struct ip *ip;
461 struct in_ifaddr *ia = NULL;
462 struct in_ifaddr_container *iac;
463 int hlen, checkif;
464 u_short sum;
465 struct in_addr pkt_dst;
466 boolean_t using_srcrt = FALSE; /* forward (by PFIL_HOOKS) */
467 struct in_addr odst; /* original dst address(NAT) */
468 struct m_tag *mtag;
469 struct sockaddr_in *next_hop = NULL;
470 lwkt_port_t port;
471 #ifdef FAST_IPSEC
472 struct tdb_ident *tdbi;
473 struct secpolicy *sp;
474 int error;
475 #endif
477 ASSERT_NETISR_NCPUS(mycpuid);
478 M_ASSERTPKTHDR(m);
480 /* length checks already done in ip_hashfn() */
481 KASSERT(m->m_len >= sizeof(struct ip), ("IP header not in one mbuf"));
484 * This routine is called from numerous places which may not have
485 * characterized the packet.
487 ip = mtod(m, struct ip *);
488 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
489 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK))) {
491 * Force hash recalculation for fragments and multicast
492 * packets; hardware may not do it correctly.
493 * XXX add flag to indicate the hash is from hardware
495 m->m_flags &= ~M_HASH;
497 if ((m->m_flags & M_HASH) == 0) {
498 ip_hashfn(&m, 0);
499 if (m == NULL)
500 return;
501 KKASSERT(m->m_flags & M_HASH);
503 if (&curthread->td_msgport !=
504 netisr_hashport(m->m_pkthdr.hash)) {
505 netisr_queue(NETISR_IP, m);
506 /* Requeued to other netisr msgport; done */
507 return;
510 /* mbuf could have been changed */
511 ip = mtod(m, struct ip *);
515 * Pull out certain tags
517 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) {
518 /* Next hop */
519 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
520 KKASSERT(mtag != NULL);
521 next_hop = m_tag_data(mtag);
524 if (m->m_pkthdr.fw_flags &
525 (DUMMYNET_MBUF_TAGGED | IPFW_MBUF_CONTINUE)) {
527 * - Dummynet already filtered this packet.
528 * - This packet was processed by ipfw on another
529 * cpu, and the rest of the ipfw processing should
530 * be carried out on this cpu.
532 ip = mtod(m, struct ip *);
533 ip->ip_len = ntohs(ip->ip_len);
534 ip->ip_off = ntohs(ip->ip_off);
535 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
536 goto iphack;
539 ipstat.ips_total++;
541 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
542 ipstat.ips_badvers++;
543 goto bad;
546 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
547 /* length checks already done in ip_hashfn() */
548 KASSERT(hlen >= sizeof(struct ip), ("IP header len too small"));
549 KASSERT(m->m_len >= hlen, ("complete IP header not in one mbuf"));
551 /* 127/8 must not appear on wire - RFC1122 */
552 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
553 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
554 if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK)) {
555 ipstat.ips_badaddr++;
556 goto bad;
560 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
561 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
562 } else {
563 if (hlen == sizeof(struct ip))
564 sum = in_cksum_hdr(ip);
565 else
566 sum = in_cksum(m, hlen);
568 if (sum != 0) {
569 ipstat.ips_badsum++;
570 goto bad;
573 #ifdef ALTQ
574 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0) {
575 /* packet is dropped by traffic conditioner */
576 return;
578 #endif
580 * Convert fields to host representation.
582 ip->ip_len = ntohs(ip->ip_len);
583 ip->ip_off = ntohs(ip->ip_off);
585 /* length checks already done in ip_hashfn() */
586 KASSERT(ip->ip_len >= hlen, ("total length less then header length"));
587 KASSERT(m->m_pkthdr.len >= ip->ip_len, ("mbuf too short"));
590 * Trim mbufs if longer than the IP header would have us expect.
592 if (m->m_pkthdr.len > ip->ip_len) {
593 if (m->m_len == m->m_pkthdr.len) {
594 m->m_len = ip->ip_len;
595 m->m_pkthdr.len = ip->ip_len;
596 } else {
597 m_adj(m, ip->ip_len - m->m_pkthdr.len);
600 #if defined(IPSEC) && !defined(IPSEC_FILTERGIF)
602 * Bypass packet filtering for packets from a tunnel (gif).
604 if (ipsec_gethist(m, NULL))
605 goto pass;
606 #endif
609 * IpHack's section.
610 * Right now when no processing on packet has done
611 * and it is still fresh out of network we do our black
612 * deals with it.
613 * - Firewall: deny/allow/divert
614 * - Xlate: translate packet's addr/port (NAT).
615 * - Pipe: pass pkt through dummynet.
616 * - Wrap: fake packet's addr/port <unimpl.>
617 * - Encapsulate: put it in another IP and send out. <unimp.>
620 iphack:
622 * If we've been forwarded from the output side, then
623 * skip the firewall a second time
625 if (next_hop != NULL)
626 goto ours;
628 /* No pfil hooks */
629 if (!pfil_has_hooks(&inet_pfil_hook)) {
630 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
632 * Strip dummynet tags from stranded packets
634 mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
635 KKASSERT(mtag != NULL);
636 m_tag_delete(m, mtag);
637 m->m_pkthdr.fw_flags &= ~DUMMYNET_MBUF_TAGGED;
639 goto pass;
643 * Run through list of hooks for input packets.
645 * NOTE! If the packet is rewritten pf/ipfw/whoever must
646 * clear M_HASH.
648 odst = ip->ip_dst;
649 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif, PFIL_IN))
650 return;
651 if (m == NULL) /* consumed by filter */
652 return;
653 ip = mtod(m, struct ip *);
654 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
655 using_srcrt = (odst.s_addr != ip->ip_dst.s_addr);
657 if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED) {
658 mtag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
659 KKASSERT(mtag != NULL);
660 next_hop = m_tag_data(mtag);
662 if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED) {
663 ip_dn_queue(m);
664 return;
666 if (m->m_pkthdr.fw_flags & FW_MBUF_REDISPATCH)
667 m->m_pkthdr.fw_flags &= ~FW_MBUF_REDISPATCH;
668 if (m->m_pkthdr.fw_flags & IPFW_MBUF_CONTINUE) {
669 /* ipfw was disabled/unloaded. */
670 goto bad;
672 pass:
674 * Process options and, if not destined for us,
675 * ship it on. ip_dooptions returns 1 when an
676 * error was detected (causing an icmp message
677 * to be sent and the original packet to be freed).
679 if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, next_hop))
680 return;
682 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
683 * matter if it is destined to another node, or whether it is
684 * a multicast one, RSVP wants it! and prevents it from being forwarded
685 * anywhere else. Also checks if the rsvp daemon is running before
686 * grabbing the packet.
688 if (rsvp_on && ip->ip_p == IPPROTO_RSVP)
689 goto ours;
692 * Check our list of addresses, to see if the packet is for us.
693 * If we don't have any addresses, assume any unicast packet
694 * we receive might be for us (and let the upper layers deal
695 * with it).
697 if (TAILQ_EMPTY(&in_ifaddrheads[mycpuid]) &&
698 !(m->m_flags & (M_MCAST | M_BCAST)))
699 goto ours;
702 * Cache the destination address of the packet; this may be
703 * changed by use of 'ipfw fwd'.
705 pkt_dst = next_hop ? next_hop->sin_addr : ip->ip_dst;
708 * Enable a consistency check between the destination address
709 * and the arrival interface for a unicast packet (the RFC 1122
710 * strong ES model) if IP forwarding is disabled and the packet
711 * is not locally generated and the packet is not subject to
712 * 'ipfw fwd'.
714 * XXX - Checking also should be disabled if the destination
715 * address is ipnat'ed to a different interface.
717 * XXX - Checking is incompatible with IP aliases added
718 * to the loopback interface instead of the interface where
719 * the packets are received.
721 checkif = ip_checkinterface &&
722 !ipforwarding &&
723 m->m_pkthdr.rcvif != NULL &&
724 !(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) &&
725 next_hop == NULL;
728 * Check for exact addresses in the hash bucket.
730 LIST_FOREACH(iac, INADDR_HASH(pkt_dst.s_addr), ia_hash) {
731 ia = iac->ia;
734 * If the address matches, verify that the packet
735 * arrived via the correct interface if checking is
736 * enabled.
738 if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst.s_addr &&
739 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif))
740 goto ours;
742 ia = NULL;
745 * Check for broadcast addresses.
747 * Only accept broadcast packets that arrive via the matching
748 * interface. Reception of forwarded directed broadcasts would
749 * be handled via ip_forward() and ether_output() with the loopback
750 * into the stack for SIMPLEX interfaces handled by ether_output().
752 if (m->m_pkthdr.rcvif != NULL &&
753 m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) {
754 struct ifaddr_container *ifac;
756 TAILQ_FOREACH(ifac, &m->m_pkthdr.rcvif->if_addrheads[mycpuid],
757 ifa_link) {
758 struct ifaddr *ifa = ifac->ifa;
760 if (ifa->ifa_addr == NULL) /* shutdown/startup race */
761 continue;
762 if (ifa->ifa_addr->sa_family != AF_INET)
763 continue;
764 ia = ifatoia(ifa);
765 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
766 pkt_dst.s_addr)
767 goto ours;
768 if (ia->ia_netbroadcast.s_addr == pkt_dst.s_addr)
769 goto ours;
770 #ifdef BOOTP_COMPAT
771 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY)
772 goto ours;
773 #endif
776 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
777 struct in_multi *inm;
779 if (ip_mrouter != NULL) {
780 /* XXX Multicast routing is not MPSAFE yet */
781 get_mplock();
784 * If we are acting as a multicast router, all
785 * incoming multicast packets are passed to the
786 * kernel-level multicast forwarding function.
787 * The packet is returned (relatively) intact; if
788 * ip_mforward() returns a non-zero value, the packet
789 * must be discarded, else it may be accepted below.
791 if (ip_mforward != NULL &&
792 ip_mforward(ip, m->m_pkthdr.rcvif, m, NULL) != 0) {
793 rel_mplock();
794 ipstat.ips_cantforward++;
795 m_freem(m);
796 return;
799 rel_mplock();
802 * The process-level routing daemon needs to receive
803 * all multicast IGMP packets, whether or not this
804 * host belongs to their destination groups.
806 if (ip->ip_p == IPPROTO_IGMP)
807 goto ours;
808 ipstat.ips_forward++;
811 * See if we belong to the destination multicast group on the
812 * arrival interface.
814 inm = IN_LOOKUP_MULTI(&ip->ip_dst, m->m_pkthdr.rcvif);
815 if (inm == NULL) {
816 ipstat.ips_notmember++;
817 m_freem(m);
818 return;
820 goto ours;
822 if (ip->ip_dst.s_addr == INADDR_BROADCAST)
823 goto ours;
824 if (ip->ip_dst.s_addr == INADDR_ANY)
825 goto ours;
828 * Not for us; forward if possible and desirable.
830 if (!ipforwarding) {
831 ipstat.ips_cantforward++;
832 m_freem(m);
833 } else {
834 #ifdef IPSEC
836 * Enforce inbound IPsec SPD.
838 if (ipsec4_in_reject(m, NULL)) {
839 ipsecstat.in_polvio++;
840 goto bad;
842 #endif
843 #ifdef FAST_IPSEC
844 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
845 crit_enter();
846 if (mtag != NULL) {
847 tdbi = (struct tdb_ident *)m_tag_data(mtag);
848 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
849 } else {
850 sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
851 IP_FORWARDING, &error);
853 if (sp == NULL) { /* NB: can happen if error */
854 crit_exit();
855 /*XXX error stat???*/
856 DPRINTF(("ip_input: no SP for forwarding\n")); /*XXX*/
857 goto bad;
861 * Check security policy against packet attributes.
863 error = ipsec_in_reject(sp, m);
864 KEY_FREESP(&sp);
865 crit_exit();
866 if (error) {
867 ipstat.ips_cantforward++;
868 goto bad;
870 #endif
871 ip_forward(m, using_srcrt, next_hop);
873 return;
875 ours:
878 * IPSTEALTH: Process non-routing options only
879 * if the packet is destined for us.
881 if (ipstealth &&
882 hlen > sizeof(struct ip) &&
883 ip_dooptions(m, 1, next_hop))
884 return;
886 /* Count the packet in the ip address stats */
887 if (ia != NULL) {
888 IFA_STAT_INC(&ia->ia_ifa, ipackets, 1);
889 IFA_STAT_INC(&ia->ia_ifa, ibytes, m->m_pkthdr.len);
893 * If offset or IP_MF are set, must reassemble.
894 * Otherwise, nothing need be done.
895 * (We could look in the reassembly queue to see
896 * if the packet was previously fragmented,
897 * but it's not worth the time; just let them time out.)
899 if (ip->ip_off & (IP_MF | IP_OFFMASK)) {
901 * Attempt reassembly; if it succeeds, proceed. ip_reass()
902 * will return a different mbuf.
904 * NOTE: ip_reass() returns m with M_HASH cleared to force
905 * us to recharacterize the packet.
907 m = ip_reass(m);
908 if (m == NULL)
909 return;
910 ip = mtod(m, struct ip *);
912 /* Get the header length of the reassembled packet */
913 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
914 } else {
915 ip->ip_len -= hlen;
918 #ifdef IPSEC
920 * enforce IPsec policy checking if we are seeing last header.
921 * note that we do not visit this with protocols with pcb layer
922 * code - like udp/tcp/raw ip.
924 if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) &&
925 ipsec4_in_reject(m, NULL)) {
926 ipsecstat.in_polvio++;
927 goto bad;
929 #endif
930 #ifdef FAST_IPSEC
932 * enforce IPsec policy checking if we are seeing last header.
933 * note that we do not visit this with protocols with pcb layer
934 * code - like udp/tcp/raw ip.
936 if (inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) {
938 * Check if the packet has already had IPsec processing
939 * done. If so, then just pass it along. This tag gets
940 * set during AH, ESP, etc. input handling, before the
941 * packet is returned to the ip input queue for delivery.
943 mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL);
944 crit_enter();
945 if (mtag != NULL) {
946 tdbi = (struct tdb_ident *)m_tag_data(mtag);
947 sp = ipsec_getpolicy(tdbi, IPSEC_DIR_INBOUND);
948 } else {
949 sp = ipsec_getpolicybyaddr(m, IPSEC_DIR_INBOUND,
950 IP_FORWARDING, &error);
952 if (sp != NULL) {
954 * Check security policy against packet attributes.
956 error = ipsec_in_reject(sp, m);
957 KEY_FREESP(&sp);
958 } else {
959 /* XXX error stat??? */
960 error = EINVAL;
961 DPRINTF(("ip_input: no SP, packet discarded\n"));/*XXX*/
962 crit_exit();
963 goto bad;
965 crit_exit();
966 if (error)
967 goto bad;
969 #endif /* FAST_IPSEC */
972 * We must forward the packet to the correct protocol thread if
973 * we are not already in it.
975 * NOTE: ip_len is now in host form. ip_len is not adjusted
976 * further for protocol processing, instead we pass hlen
977 * to the protosw and let it deal with it.
979 ipstat.ips_delivered++;
981 if ((m->m_flags & M_HASH) == 0) {
982 #ifdef RSS_DEBUG
983 atomic_add_long(&ip_rehash_count, 1);
984 #endif
985 ip->ip_len = htons(ip->ip_len + hlen);
986 ip->ip_off = htons(ip->ip_off);
988 ip_hashfn(&m, 0);
989 if (m == NULL)
990 return;
992 ip = mtod(m, struct ip *);
993 ip->ip_len = ntohs(ip->ip_len) - hlen;
994 ip->ip_off = ntohs(ip->ip_off);
995 KKASSERT(m->m_flags & M_HASH);
997 port = netisr_hashport(m->m_pkthdr.hash);
999 if (port != &curthread->td_msgport) {
1000 struct netmsg_packet *pmsg;
1002 #ifdef RSS_DEBUG
1003 atomic_add_long(&ip_dispatch_slow, 1);
1004 #endif
1006 pmsg = &m->m_hdr.mh_netmsg;
1007 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport,
1008 0, transport_processing_handler);
1009 pmsg->nm_packet = m;
1010 pmsg->base.lmsg.u.ms_result = hlen;
1011 lwkt_sendmsg(port, &pmsg->base.lmsg);
1012 } else {
1013 #ifdef RSS_DEBUG
1014 atomic_add_long(&ip_dispatch_fast, 1);
1015 #endif
1016 transport_processing_oncpu(m, hlen, ip);
1018 return;
1020 bad:
1021 m_freem(m);
1025 * Take incoming datagram fragment and try to reassemble it into
1026 * whole datagram. If a chain for reassembly of this datagram already
1027 * exists, then it is given as fp; otherwise have to make a chain.
1029 struct mbuf *
1030 ip_reass(struct mbuf *m)
1032 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid];
1033 struct ip *ip = mtod(m, struct ip *);
1034 struct mbuf *p = NULL, *q, *nq;
1035 struct mbuf *n;
1036 struct ipq *fp = NULL;
1037 struct ipqhead *head;
1038 int hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1039 int i, next;
1040 u_short sum;
1042 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
1043 if (maxnipq == 0 || maxfragsperpacket == 0) {
1044 ipstat.ips_fragments++;
1045 ipstat.ips_fragdropped++;
1046 m_freem(m);
1047 return NULL;
1050 sum = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
1052 * Look for queue of fragments of this datagram.
1054 head = &fragq->ipq[sum];
1055 TAILQ_FOREACH(fp, head, ipq_list) {
1056 if (ip->ip_id == fp->ipq_id &&
1057 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
1058 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
1059 ip->ip_p == fp->ipq_p)
1060 goto found;
1063 fp = NULL;
1066 * Enforce upper bound on number of fragmented packets
1067 * for which we attempt reassembly;
1068 * If maxnipq is -1, accept all fragments without limitation.
1070 if (fragq->nipq > maxnipq && maxnipq > 0) {
1072 * drop something from the tail of the current queue
1073 * before proceeding further
1075 struct ipq *q = TAILQ_LAST(head, ipqhead);
1076 if (q == NULL) {
1078 * The current queue is empty,
1079 * so drop from one of the others.
1081 for (i = 0; i < IPREASS_NHASH; i++) {
1082 struct ipq *r = TAILQ_LAST(&fragq->ipq[i],
1083 ipqhead);
1084 if (r) {
1085 ipstat.ips_fragtimeout += r->ipq_nfrags;
1086 ip_freef(fragq, &fragq->ipq[i], r);
1087 break;
1090 } else {
1091 ipstat.ips_fragtimeout += q->ipq_nfrags;
1092 ip_freef(fragq, head, q);
1095 found:
1097 * Adjust ip_len to not reflect header,
1098 * convert offset of this to bytes.
1100 ip->ip_len -= hlen;
1101 if (ip->ip_off & IP_MF) {
1103 * Make sure that fragments have a data length
1104 * that's a non-zero multiple of 8 bytes.
1106 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
1107 ipstat.ips_toosmall++; /* XXX */
1108 m_freem(m);
1109 goto done;
1111 m->m_flags |= M_FRAG;
1112 } else {
1113 m->m_flags &= ~M_FRAG;
1115 ip->ip_off <<= 3;
1117 ipstat.ips_fragments++;
1118 m->m_pkthdr.header = ip;
1121 * If the hardware has not done csum over this fragment
1122 * then csum_data is not valid at all.
1124 if ((m->m_pkthdr.csum_flags & (CSUM_FRAG_NOT_CHECKED | CSUM_DATA_VALID))
1125 == (CSUM_FRAG_NOT_CHECKED | CSUM_DATA_VALID)) {
1126 m->m_pkthdr.csum_data = 0;
1127 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1131 * Presence of header sizes in mbufs
1132 * would confuse code below.
1134 m->m_data += hlen;
1135 m->m_len -= hlen;
1138 * If first fragment to arrive, create a reassembly queue.
1140 if (fp == NULL) {
1141 if ((fp = mpipe_alloc_nowait(&ipq_mpipe)) == NULL)
1142 goto dropfrag;
1143 TAILQ_INSERT_HEAD(head, fp, ipq_list);
1144 fragq->nipq++;
1145 fp->ipq_nfrags = 1;
1146 fp->ipq_ttl = IPFRAGTTL;
1147 fp->ipq_p = ip->ip_p;
1148 fp->ipq_id = ip->ip_id;
1149 fp->ipq_src = ip->ip_src;
1150 fp->ipq_dst = ip->ip_dst;
1151 fp->ipq_frags = m;
1152 m->m_nextpkt = NULL;
1153 goto inserted;
1155 fp->ipq_nfrags++;
1157 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
1160 * Find a segment which begins after this one does.
1162 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1163 if (GETIP(q)->ip_off > ip->ip_off)
1164 break;
1168 * If there is a preceding segment, it may provide some of
1169 * our data already. If so, drop the data from the incoming
1170 * segment. If it provides all of our data, drop us, otherwise
1171 * stick new segment in the proper place.
1173 * If some of the data is dropped from the the preceding
1174 * segment, then it's checksum is invalidated.
1176 if (p) {
1177 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
1178 if (i > 0) {
1179 if (i >= ip->ip_len)
1180 goto dropfrag;
1181 m_adj(m, i);
1182 m->m_pkthdr.csum_flags = 0;
1183 ip->ip_off += i;
1184 ip->ip_len -= i;
1186 m->m_nextpkt = p->m_nextpkt;
1187 p->m_nextpkt = m;
1188 } else {
1189 m->m_nextpkt = fp->ipq_frags;
1190 fp->ipq_frags = m;
1194 * While we overlap succeeding segments trim them or,
1195 * if they are completely covered, dequeue them.
1197 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
1198 q = nq) {
1199 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
1200 if (i < GETIP(q)->ip_len) {
1201 GETIP(q)->ip_len -= i;
1202 GETIP(q)->ip_off += i;
1203 m_adj(q, i);
1204 q->m_pkthdr.csum_flags = 0;
1205 break;
1207 nq = q->m_nextpkt;
1208 m->m_nextpkt = nq;
1209 ipstat.ips_fragdropped++;
1210 fp->ipq_nfrags--;
1211 q->m_nextpkt = NULL;
1212 m_freem(q);
1215 inserted:
1217 * Check for complete reassembly and perform frag per packet
1218 * limiting.
1220 * Frag limiting is performed here so that the nth frag has
1221 * a chance to complete the packet before we drop the packet.
1222 * As a result, n+1 frags are actually allowed per packet, but
1223 * only n will ever be stored. (n = maxfragsperpacket.)
1226 next = 0;
1227 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1228 if (GETIP(q)->ip_off != next) {
1229 if (fp->ipq_nfrags > maxfragsperpacket) {
1230 ipstat.ips_fragdropped += fp->ipq_nfrags;
1231 ip_freef(fragq, head, fp);
1233 goto done;
1235 next += GETIP(q)->ip_len;
1237 /* Make sure the last packet didn't have the IP_MF flag */
1238 if (p->m_flags & M_FRAG) {
1239 if (fp->ipq_nfrags > maxfragsperpacket) {
1240 ipstat.ips_fragdropped += fp->ipq_nfrags;
1241 ip_freef(fragq, head, fp);
1243 goto done;
1247 * Reassembly is complete. Make sure the packet is a sane size.
1249 q = fp->ipq_frags;
1250 ip = GETIP(q);
1251 if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) {
1252 ipstat.ips_toolong++;
1253 ipstat.ips_fragdropped += fp->ipq_nfrags;
1254 ip_freef(fragq, head, fp);
1255 goto done;
1259 * Concatenate fragments.
1261 m = q;
1262 n = m->m_next;
1263 m->m_next = NULL;
1264 m_cat(m, n);
1265 nq = q->m_nextpkt;
1266 q->m_nextpkt = NULL;
1267 for (q = nq; q != NULL; q = nq) {
1268 nq = q->m_nextpkt;
1269 q->m_nextpkt = NULL;
1270 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
1271 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
1272 m_cat(m, q);
1276 * Clean up the 1's complement checksum. Carry over 16 bits must
1277 * be added back. This assumes no more then 65535 packet fragments
1278 * were reassembled. A second carry can also occur (but not a third).
1280 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
1281 (m->m_pkthdr.csum_data >> 16);
1282 if (m->m_pkthdr.csum_data > 0xFFFF)
1283 m->m_pkthdr.csum_data -= 0xFFFF;
1286 * Create header for new ip packet by
1287 * modifying header of first packet;
1288 * dequeue and discard fragment reassembly header.
1289 * Make header visible.
1291 ip->ip_len = next;
1292 ip->ip_src = fp->ipq_src;
1293 ip->ip_dst = fp->ipq_dst;
1294 TAILQ_REMOVE(head, fp, ipq_list);
1295 fragq->nipq--;
1296 mpipe_free(&ipq_mpipe, fp);
1297 m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
1298 m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
1299 /* some debugging cruft by sklower, below, will go away soon */
1300 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
1301 int plen = 0;
1303 for (n = m; n; n = n->m_next)
1304 plen += n->m_len;
1305 m->m_pkthdr.len = plen;
1309 * Reassembly complete, return the next protocol.
1311 * Be sure to clear M_HASH to force the packet
1312 * to be re-characterized.
1314 * Clear M_FRAG, we are no longer a fragment.
1316 m->m_flags &= ~(M_HASH | M_FRAG);
1318 ipstat.ips_reassembled++;
1319 return (m);
1321 dropfrag:
1322 ipstat.ips_fragdropped++;
1323 if (fp != NULL)
1324 fp->ipq_nfrags--;
1325 m_freem(m);
1326 done:
1327 return (NULL);
1329 #undef GETIP
1333 * Free a fragment reassembly header and all
1334 * associated datagrams.
1336 static void
1337 ip_freef(struct ipfrag_queue *fragq, struct ipqhead *fhp, struct ipq *fp)
1339 struct mbuf *q;
1342 * Remove first to protect against blocking
1344 TAILQ_REMOVE(fhp, fp, ipq_list);
1347 * Clean out at our leisure
1349 while (fp->ipq_frags) {
1350 q = fp->ipq_frags;
1351 fp->ipq_frags = q->m_nextpkt;
1352 q->m_nextpkt = NULL;
1353 m_freem(q);
1355 mpipe_free(&ipq_mpipe, fp);
1356 fragq->nipq--;
1360 * If a timer expires on a reassembly queue, discard it.
1362 static void
1363 ipfrag_timeo_dispatch(netmsg_t nmsg)
1365 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid];
1366 struct ipq *fp, *fp_temp;
1367 struct ipqhead *head;
1368 int i;
1370 crit_enter();
1371 netisr_replymsg(&nmsg->base, 0); /* reply ASAP */
1372 crit_exit();
1374 if (fragq->nipq == 0)
1375 goto done;
1377 for (i = 0; i < IPREASS_NHASH; i++) {
1378 head = &fragq->ipq[i];
1379 TAILQ_FOREACH_MUTABLE(fp, head, ipq_list, fp_temp) {
1380 if (--fp->ipq_ttl == 0) {
1381 ipstat.ips_fragtimeout += fp->ipq_nfrags;
1382 ip_freef(fragq, head, fp);
1387 * If we are over the maximum number of fragments
1388 * (due to the limit being lowered), drain off
1389 * enough to get down to the new limit.
1391 if (maxnipq >= 0 && fragq->nipq > maxnipq) {
1392 for (i = 0; i < IPREASS_NHASH; i++) {
1393 head = &fragq->ipq[i];
1394 while (fragq->nipq > maxnipq && !TAILQ_EMPTY(head)) {
1395 ipstat.ips_fragdropped +=
1396 TAILQ_FIRST(head)->ipq_nfrags;
1397 ip_freef(fragq, head, TAILQ_FIRST(head));
1401 done:
1402 callout_reset(&fragq->timeo_ch, IPFRAG_TIMEO, ipfrag_timeo, NULL);
1405 static void
1406 ipfrag_timeo(void *dummy __unused)
1408 struct netmsg_base *msg = &ipfrag_queue_pcpu[mycpuid].timeo_netmsg;
1410 crit_enter();
1411 if (msg->lmsg.ms_flags & MSGF_DONE)
1412 netisr_sendmsg_oncpu(msg);
1413 crit_exit();
1417 * Drain off all datagram fragments.
1419 static void
1420 ipfrag_drain_oncpu(struct ipfrag_queue *fragq)
1422 struct ipqhead *head;
1423 int i;
1425 for (i = 0; i < IPREASS_NHASH; i++) {
1426 head = &fragq->ipq[i];
1427 while (!TAILQ_EMPTY(head)) {
1428 ipstat.ips_fragdropped += TAILQ_FIRST(head)->ipq_nfrags;
1429 ip_freef(fragq, head, TAILQ_FIRST(head));
1434 static void
1435 ipfrag_drain_dispatch(netmsg_t nmsg)
1437 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[mycpuid];
1439 crit_enter();
1440 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */
1441 crit_exit();
1443 ipfrag_drain_oncpu(fragq);
1444 fragq->draining = 0;
1447 static void
1448 ipfrag_drain_ipi(void *arg __unused)
1450 int cpu = mycpuid;
1451 struct lwkt_msg *msg = &ipfrag_queue_pcpu[cpu].drain_netmsg.lmsg;
1453 crit_enter();
1454 if (msg->ms_flags & MSGF_DONE)
1455 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg);
1456 crit_exit();
1459 static void
1460 ipfrag_drain(void)
1462 cpumask_t mask;
1463 int cpu;
1465 CPUMASK_ASSBMASK(mask, netisr_ncpus);
1466 CPUMASK_ANDMASK(mask, smp_active_mask);
1468 if (IN_NETISR_NCPUS(mycpuid)) {
1469 ipfrag_drain_oncpu(&ipfrag_queue_pcpu[mycpuid]);
1470 CPUMASK_NANDBIT(mask, mycpuid);
1473 for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
1474 struct ipfrag_queue *fragq = &ipfrag_queue_pcpu[cpu];
1476 if (!CPUMASK_TESTBIT(mask, cpu))
1477 continue;
1479 if (fragq->nipq == 0 || fragq->draining) {
1480 /* No fragments or is draining; skip this cpu. */
1481 CPUMASK_NANDBIT(mask, cpu);
1482 continue;
1484 fragq->draining = 1;
1487 if (CPUMASK_TESTNZERO(mask))
1488 lwkt_send_ipiq_mask(mask, ipfrag_drain_ipi, NULL);
1491 void
1492 ip_drain(void)
1494 ipfrag_drain();
1495 in_rtqdrain();
1499 * Do option processing on a datagram,
1500 * possibly discarding it if bad options are encountered,
1501 * or forwarding it if source-routed.
1502 * The pass argument is used when operating in the IPSTEALTH
1503 * mode to tell what options to process:
1504 * [LS]SRR (pass 0) or the others (pass 1).
1505 * The reason for as many as two passes is that when doing IPSTEALTH,
1506 * non-routing options should be processed only if the packet is for us.
1507 * Returns 1 if packet has been forwarded/freed,
1508 * 0 if the packet should be processed further.
1510 static int
1511 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop)
1513 struct sockaddr_in ipaddr = { sizeof ipaddr, AF_INET };
1514 struct ip *ip = mtod(m, struct ip *);
1515 u_char *cp;
1516 struct in_ifaddr *ia;
1517 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB;
1518 boolean_t forward = FALSE;
1519 struct in_addr *sin, dst;
1520 n_time ntime;
1522 dst = ip->ip_dst;
1523 cp = (u_char *)(ip + 1);
1524 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
1525 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1526 opt = cp[IPOPT_OPTVAL];
1527 if (opt == IPOPT_EOL)
1528 break;
1529 if (opt == IPOPT_NOP)
1530 optlen = 1;
1531 else {
1532 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
1533 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1534 goto bad;
1536 optlen = cp[IPOPT_OLEN];
1537 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
1538 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1539 goto bad;
1542 switch (opt) {
1544 default:
1545 break;
1548 * Source routing with record.
1549 * Find interface with current destination address.
1550 * If none on this machine then drop if strictly routed,
1551 * or do nothing if loosely routed.
1552 * Record interface address and bring up next address
1553 * component. If strictly routed make sure next
1554 * address is on directly accessible net.
1556 case IPOPT_LSRR:
1557 case IPOPT_SSRR:
1558 if (ipstealth && pass > 0)
1559 break;
1560 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1561 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1562 goto bad;
1564 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1565 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1566 goto bad;
1568 ipaddr.sin_addr = ip->ip_dst;
1569 ia = (struct in_ifaddr *)
1570 ifa_ifwithaddr((struct sockaddr *)&ipaddr);
1571 if (ia == NULL) {
1572 if (opt == IPOPT_SSRR) {
1573 type = ICMP_UNREACH;
1574 code = ICMP_UNREACH_SRCFAIL;
1575 goto bad;
1577 if (!ip_dosourceroute)
1578 goto nosourcerouting;
1580 * Loose routing, and not at next destination
1581 * yet; nothing to do except forward.
1583 break;
1585 off--; /* 0 origin */
1586 if (off > optlen - (int)sizeof(struct in_addr)) {
1588 * End of source route. Should be for us.
1590 if (!ip_acceptsourceroute)
1591 goto nosourcerouting;
1592 save_rte(m, cp, ip->ip_src);
1593 break;
1595 if (ipstealth)
1596 goto dropit;
1597 if (!ip_dosourceroute) {
1598 if (ipforwarding) {
1599 char sbuf[INET_ADDRSTRLEN];
1600 char dbuf[INET_ADDRSTRLEN];
1603 * Acting as a router, so generate ICMP
1605 nosourcerouting:
1606 log(LOG_WARNING,
1607 "attempted source route from %s to %s\n",
1608 kinet_ntoa(ip->ip_src, sbuf),
1609 kinet_ntoa(ip->ip_dst, dbuf));
1610 type = ICMP_UNREACH;
1611 code = ICMP_UNREACH_SRCFAIL;
1612 goto bad;
1613 } else {
1615 * Not acting as a router,
1616 * so silently drop.
1618 dropit:
1619 ipstat.ips_cantforward++;
1620 m_freem(m);
1621 return (1);
1626 * locate outgoing interface
1628 memcpy(&ipaddr.sin_addr, cp + off,
1629 sizeof ipaddr.sin_addr);
1631 if (opt == IPOPT_SSRR) {
1632 #define INA struct in_ifaddr *
1633 #define SA struct sockaddr *
1634 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr))
1635 == NULL)
1636 ia = (INA)ifa_ifwithnet((SA)&ipaddr);
1637 } else {
1638 ia = ip_rtaddr(ipaddr.sin_addr, NULL);
1640 if (ia == NULL) {
1641 type = ICMP_UNREACH;
1642 code = ICMP_UNREACH_SRCFAIL;
1643 goto bad;
1645 ip->ip_dst = ipaddr.sin_addr;
1646 memcpy(cp + off, &IA_SIN(ia)->sin_addr,
1647 sizeof(struct in_addr));
1648 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1650 * Let ip_intr's mcast routing check handle mcast pkts
1652 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
1653 break;
1655 case IPOPT_RR:
1656 if (ipstealth && pass == 0)
1657 break;
1658 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1659 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1660 goto bad;
1662 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1663 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1664 goto bad;
1667 * If no space remains, ignore.
1669 off--; /* 0 origin */
1670 if (off > optlen - (int)sizeof(struct in_addr))
1671 break;
1672 memcpy(&ipaddr.sin_addr, &ip->ip_dst,
1673 sizeof ipaddr.sin_addr);
1675 * locate outgoing interface; if we're the destination,
1676 * use the incoming interface (should be same).
1678 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == NULL &&
1679 (ia = ip_rtaddr(ipaddr.sin_addr, NULL)) == NULL) {
1680 type = ICMP_UNREACH;
1681 code = ICMP_UNREACH_HOST;
1682 goto bad;
1684 memcpy(cp + off, &IA_SIN(ia)->sin_addr,
1685 sizeof(struct in_addr));
1686 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1687 break;
1689 case IPOPT_TS:
1690 if (ipstealth && pass == 0)
1691 break;
1692 code = cp - (u_char *)ip;
1693 if (optlen < 4 || optlen > 40) {
1694 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1695 goto bad;
1697 if ((off = cp[IPOPT_OFFSET]) < 5) {
1698 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1699 goto bad;
1701 if (off > optlen - (int)sizeof(int32_t)) {
1702 cp[IPOPT_OFFSET + 1] += (1 << 4);
1703 if ((cp[IPOPT_OFFSET + 1] & 0xf0) == 0) {
1704 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1705 goto bad;
1707 break;
1709 off--; /* 0 origin */
1710 sin = (struct in_addr *)(cp + off);
1711 switch (cp[IPOPT_OFFSET + 1] & 0x0f) {
1713 case IPOPT_TS_TSONLY:
1714 break;
1716 case IPOPT_TS_TSANDADDR:
1717 if (off + sizeof(n_time) +
1718 sizeof(struct in_addr) > optlen) {
1719 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1720 goto bad;
1722 ipaddr.sin_addr = dst;
1723 ia = (INA)ifaof_ifpforaddr((SA)&ipaddr,
1724 m->m_pkthdr.rcvif);
1725 if (ia == NULL)
1726 continue;
1727 memcpy(sin, &IA_SIN(ia)->sin_addr,
1728 sizeof(struct in_addr));
1729 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1730 off += sizeof(struct in_addr);
1731 break;
1733 case IPOPT_TS_PRESPEC:
1734 if (off + sizeof(n_time) +
1735 sizeof(struct in_addr) > optlen) {
1736 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1737 goto bad;
1739 memcpy(&ipaddr.sin_addr, sin,
1740 sizeof(struct in_addr));
1741 if (ifa_ifwithaddr((SA)&ipaddr) == NULL)
1742 continue;
1743 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1744 off += sizeof(struct in_addr);
1745 break;
1747 default:
1748 code = &cp[IPOPT_OFFSET + 1] - (u_char *)ip;
1749 goto bad;
1751 ntime = iptime();
1752 memcpy(cp + off, &ntime, sizeof(n_time));
1753 cp[IPOPT_OFFSET] += sizeof(n_time);
1756 if (forward && ipforwarding) {
1757 ip_forward(m, TRUE, next_hop);
1758 return (1);
1760 return (0);
1761 bad:
1762 icmp_error(m, type, code, 0, 0);
1763 ipstat.ips_badoptions++;
1764 return (1);
1768 * Given address of next destination (final or next hop),
1769 * return internet address info of interface to be used to get there.
1771 struct in_ifaddr *
1772 ip_rtaddr(struct in_addr dst, struct route *ro0)
1774 struct route sro, *ro;
1775 struct sockaddr_in *sin;
1776 struct in_ifaddr *ia;
1778 if (ro0 != NULL) {
1779 ro = ro0;
1780 } else {
1781 bzero(&sro, sizeof(sro));
1782 ro = &sro;
1785 sin = (struct sockaddr_in *)&ro->ro_dst;
1787 if (ro->ro_rt == NULL || dst.s_addr != sin->sin_addr.s_addr) {
1788 if (ro->ro_rt != NULL) {
1789 RTFREE(ro->ro_rt);
1790 ro->ro_rt = NULL;
1792 sin->sin_family = AF_INET;
1793 sin->sin_len = sizeof *sin;
1794 sin->sin_addr = dst;
1795 rtalloc_ign(ro, RTF_PRCLONING);
1798 if (ro->ro_rt == NULL)
1799 return (NULL);
1801 ia = ifatoia(ro->ro_rt->rt_ifa);
1803 if (ro == &sro)
1804 RTFREE(ro->ro_rt);
1805 return ia;
1809 * Save incoming source route for use in replies,
1810 * to be picked up later by ip_srcroute if the receiver is interested.
1812 static void
1813 save_rte(struct mbuf *m, u_char *option, struct in_addr dst)
1815 struct m_tag *mtag;
1816 struct ip_srcrt_opt *opt;
1817 unsigned olen;
1819 mtag = m_tag_get(PACKET_TAG_IPSRCRT, sizeof(*opt), M_NOWAIT);
1820 if (mtag == NULL)
1821 return;
1822 opt = m_tag_data(mtag);
1824 olen = option[IPOPT_OLEN];
1825 #ifdef DIAGNOSTIC
1826 if (ipprintfs)
1827 kprintf("save_rte: olen %d\n", olen);
1828 #endif
1829 if (olen > sizeof(opt->ip_srcrt) - (1 + sizeof(dst))) {
1830 m_tag_free(mtag);
1831 return;
1833 bcopy(option, opt->ip_srcrt.srcopt, olen);
1834 opt->ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
1835 opt->ip_srcrt.dst = dst;
1836 m_tag_prepend(m, mtag);
1840 * Retrieve incoming source route for use in replies,
1841 * in the same form used by setsockopt.
1842 * The first hop is placed before the options, will be removed later.
1844 struct mbuf *
1845 ip_srcroute(struct mbuf *m0)
1847 struct in_addr *p, *q;
1848 struct mbuf *m;
1849 struct m_tag *mtag;
1850 struct ip_srcrt_opt *opt;
1852 if (m0 == NULL)
1853 return NULL;
1855 mtag = m_tag_find(m0, PACKET_TAG_IPSRCRT, NULL);
1856 if (mtag == NULL)
1857 return NULL;
1858 opt = m_tag_data(mtag);
1860 if (opt->ip_nhops == 0)
1861 return (NULL);
1862 m = m_get(M_NOWAIT, MT_HEADER);
1863 if (m == NULL)
1864 return (NULL);
1866 #define OPTSIZ (sizeof(opt->ip_srcrt.nop) + sizeof(opt->ip_srcrt.srcopt))
1868 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
1869 m->m_len = opt->ip_nhops * sizeof(struct in_addr) +
1870 sizeof(struct in_addr) + OPTSIZ;
1871 #ifdef DIAGNOSTIC
1872 if (ipprintfs) {
1873 kprintf("ip_srcroute: nhops %d mlen %d",
1874 opt->ip_nhops, m->m_len);
1876 #endif
1879 * First save first hop for return route
1881 p = &opt->ip_srcrt.route[opt->ip_nhops - 1];
1882 *(mtod(m, struct in_addr *)) = *p--;
1883 #ifdef DIAGNOSTIC
1884 if (ipprintfs)
1885 kprintf(" hops %x", ntohl(mtod(m, struct in_addr *)->s_addr));
1886 #endif
1889 * Copy option fields and padding (nop) to mbuf.
1891 opt->ip_srcrt.nop = IPOPT_NOP;
1892 opt->ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
1893 memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &opt->ip_srcrt.nop,
1894 OPTSIZ);
1895 q = (struct in_addr *)(mtod(m, caddr_t) +
1896 sizeof(struct in_addr) + OPTSIZ);
1897 #undef OPTSIZ
1899 * Record return path as an IP source route,
1900 * reversing the path (pointers are now aligned).
1902 while (p >= opt->ip_srcrt.route) {
1903 #ifdef DIAGNOSTIC
1904 if (ipprintfs)
1905 kprintf(" %x", ntohl(q->s_addr));
1906 #endif
1907 *q++ = *p--;
1910 * Last hop goes to final destination.
1912 *q = opt->ip_srcrt.dst;
1913 m_tag_delete(m0, mtag);
1914 #ifdef DIAGNOSTIC
1915 if (ipprintfs)
1916 kprintf(" %x\n", ntohl(q->s_addr));
1917 #endif
1918 return (m);
1922 * Strip out IP options.
1924 void
1925 ip_stripoptions(struct mbuf *m)
1927 int datalen;
1928 struct ip *ip = mtod(m, struct ip *);
1929 caddr_t opts;
1930 int optlen;
1932 optlen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
1933 opts = (caddr_t)(ip + 1);
1934 datalen = m->m_len - (sizeof(struct ip) + optlen);
1935 bcopy(opts + optlen, opts, datalen);
1936 m->m_len -= optlen;
1937 if (m->m_flags & M_PKTHDR)
1938 m->m_pkthdr.len -= optlen;
1939 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
1942 u_char inetctlerrmap[PRC_NCMDS] = {
1943 0, 0, 0, 0,
1944 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
1945 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
1946 EMSGSIZE, EHOSTUNREACH, 0, 0,
1947 0, 0, 0, 0,
1948 ENOPROTOOPT, ECONNREFUSED
1952 * Forward a packet. If some error occurs return the sender
1953 * an icmp packet. Note we can't always generate a meaningful
1954 * icmp message because icmp doesn't have a large enough repertoire
1955 * of codes and types.
1957 * If not forwarding, just drop the packet. This could be confusing
1958 * if ipforwarding was zero but some routing protocol was advancing
1959 * us as a gateway to somewhere. However, we must let the routing
1960 * protocol deal with that.
1962 * The using_srcrt parameter indicates whether the packet is being forwarded
1963 * via a source route.
1965 void
1966 ip_forward(struct mbuf *m, boolean_t using_srcrt, struct sockaddr_in *next_hop)
1968 struct ip *ip = mtod(m, struct ip *);
1969 struct rtentry *rt;
1970 struct route fwd_ro;
1971 int error, type = 0, code = 0, destmtu = 0;
1972 struct mbuf *mcopy, *mtemp = NULL;
1973 n_long dest;
1974 struct in_addr pkt_dst;
1976 dest = INADDR_ANY;
1978 * Cache the destination address of the packet; this may be
1979 * changed by use of 'ipfw fwd'.
1981 pkt_dst = (next_hop != NULL) ? next_hop->sin_addr : ip->ip_dst;
1983 #ifdef DIAGNOSTIC
1984 if (ipprintfs)
1985 kprintf("forward: src %x dst %x ttl %x\n",
1986 ip->ip_src.s_addr, pkt_dst.s_addr, ip->ip_ttl);
1987 #endif
1989 if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) {
1990 ipstat.ips_cantforward++;
1991 m_freem(m);
1992 return;
1994 if (!ipstealth && ip->ip_ttl <= IPTTLDEC) {
1995 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0);
1996 return;
1999 bzero(&fwd_ro, sizeof(fwd_ro));
2000 ip_rtaddr(pkt_dst, &fwd_ro);
2001 if (fwd_ro.ro_rt == NULL) {
2002 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
2003 return;
2005 rt = fwd_ro.ro_rt;
2007 if (curthread->td_type == TD_TYPE_NETISR) {
2009 * Save the IP header and at most 8 bytes of the payload,
2010 * in case we need to generate an ICMP message to the src.
2012 mtemp = ipforward_mtemp[mycpuid];
2013 KASSERT((mtemp->m_flags & M_EXT) == 0 &&
2014 mtemp->m_data == mtemp->m_pktdat &&
2015 m_tag_first(mtemp) == NULL,
2016 ("ip_forward invalid mtemp1"));
2018 if (!m_dup_pkthdr(mtemp, m, M_NOWAIT)) {
2020 * It's probably ok if the pkthdr dup fails (because
2021 * the deep copy of the tag chain failed), but for now
2022 * be conservative and just discard the copy since
2023 * code below may some day want the tags.
2025 mtemp = NULL;
2026 } else {
2027 mtemp->m_type = m->m_type;
2028 mtemp->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8,
2029 (int)ip->ip_len);
2030 mtemp->m_pkthdr.len = mtemp->m_len;
2031 m_copydata(m, 0, mtemp->m_len, mtod(mtemp, caddr_t));
2035 if (!ipstealth)
2036 ip->ip_ttl -= IPTTLDEC;
2039 * If forwarding packet using same interface that it came in on,
2040 * perhaps should send a redirect to sender to shortcut a hop.
2041 * Only send redirect if source is sending directly to us,
2042 * and if packet was not source routed (or has any options).
2043 * Also, don't send redirect if forwarding using a default route
2044 * or a route modified by a redirect.
2046 if (rt->rt_ifp == m->m_pkthdr.rcvif &&
2047 !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) &&
2048 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY &&
2049 ipsendredirects && !using_srcrt && next_hop == NULL) {
2050 u_long src = ntohl(ip->ip_src.s_addr);
2051 struct in_ifaddr *rt_ifa = (struct in_ifaddr *)rt->rt_ifa;
2053 if (rt_ifa != NULL &&
2054 (src & rt_ifa->ia_subnetmask) == rt_ifa->ia_subnet) {
2055 if (rt->rt_flags & RTF_GATEWAY)
2056 dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
2057 else
2058 dest = pkt_dst.s_addr;
2060 * Router requirements says to only send
2061 * host redirects.
2063 type = ICMP_REDIRECT;
2064 code = ICMP_REDIRECT_HOST;
2065 #ifdef DIAGNOSTIC
2066 if (ipprintfs)
2067 kprintf("redirect (%d) to %x\n", code, dest);
2068 #endif
2072 error = ip_output(m, NULL, &fwd_ro, IP_FORWARDING, NULL, NULL);
2073 if (error == 0) {
2074 ipstat.ips_forward++;
2075 if (type == 0) {
2076 if (mtemp)
2077 ipflow_create(&fwd_ro, mtemp);
2078 goto done;
2080 ipstat.ips_redirectsent++;
2081 } else {
2082 ipstat.ips_cantforward++;
2085 if (mtemp == NULL)
2086 goto done;
2089 * Errors that do not require generating ICMP message
2091 switch (error) {
2092 case ENOBUFS:
2094 * A router should not generate ICMP_SOURCEQUENCH as
2095 * required in RFC1812 Requirements for IP Version 4 Routers.
2096 * Source quench could be a big problem under DoS attacks,
2097 * or if the underlying interface is rate-limited.
2098 * Those who need source quench packets may re-enable them
2099 * via the net.inet.ip.sendsourcequench sysctl.
2101 if (!ip_sendsourcequench)
2102 goto done;
2103 break;
2105 case EACCES: /* ipfw denied packet */
2106 goto done;
2109 KASSERT((mtemp->m_flags & M_EXT) == 0 &&
2110 mtemp->m_data == mtemp->m_pktdat,
2111 ("ip_forward invalid mtemp2"));
2112 mcopy = m_copym(mtemp, 0, mtemp->m_len, M_NOWAIT);
2113 if (mcopy == NULL)
2114 goto done;
2117 * Send ICMP message.
2119 switch (error) {
2120 case 0: /* forwarded, but need redirect */
2121 /* type, code set above */
2122 break;
2124 case ENETUNREACH: /* shouldn't happen, checked above */
2125 case EHOSTUNREACH:
2126 case ENETDOWN:
2127 case EHOSTDOWN:
2128 default:
2129 type = ICMP_UNREACH;
2130 code = ICMP_UNREACH_HOST;
2131 break;
2133 case EMSGSIZE:
2134 type = ICMP_UNREACH;
2135 code = ICMP_UNREACH_NEEDFRAG;
2136 #ifdef IPSEC
2138 * If the packet is routed over IPsec tunnel, tell the
2139 * originator the tunnel MTU.
2140 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
2141 * XXX quickhack!!!
2143 if (fwd_ro.ro_rt != NULL) {
2144 struct secpolicy *sp = NULL;
2145 int ipsecerror;
2146 int ipsechdr;
2147 struct route *ro;
2149 sp = ipsec4_getpolicybyaddr(mcopy,
2150 IPSEC_DIR_OUTBOUND,
2151 IP_FORWARDING,
2152 &ipsecerror);
2154 if (sp == NULL)
2155 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu;
2156 else {
2157 /* count IPsec header size */
2158 ipsechdr = ipsec4_hdrsiz(mcopy,
2159 IPSEC_DIR_OUTBOUND,
2160 NULL);
2163 * find the correct route for outer IPv4
2164 * header, compute tunnel MTU.
2167 if (sp->req != NULL && sp->req->sav != NULL &&
2168 sp->req->sav->sah != NULL) {
2169 ro = &sp->req->sav->sah->sa_route;
2170 if (ro->ro_rt != NULL &&
2171 ro->ro_rt->rt_ifp != NULL) {
2172 destmtu =
2173 ro->ro_rt->rt_ifp->if_mtu;
2174 destmtu -= ipsechdr;
2178 key_freesp(sp);
2181 #elif defined(FAST_IPSEC)
2183 * If the packet is routed over IPsec tunnel, tell the
2184 * originator the tunnel MTU.
2185 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
2186 * XXX quickhack!!!
2188 if (fwd_ro.ro_rt != NULL) {
2189 struct secpolicy *sp = NULL;
2190 int ipsecerror;
2191 int ipsechdr;
2192 struct route *ro;
2194 sp = ipsec_getpolicybyaddr(mcopy,
2195 IPSEC_DIR_OUTBOUND,
2196 IP_FORWARDING,
2197 &ipsecerror);
2199 if (sp == NULL)
2200 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu;
2201 else {
2202 /* count IPsec header size */
2203 ipsechdr = ipsec4_hdrsiz(mcopy,
2204 IPSEC_DIR_OUTBOUND,
2205 NULL);
2208 * find the correct route for outer IPv4
2209 * header, compute tunnel MTU.
2212 if (sp->req != NULL &&
2213 sp->req->sav != NULL &&
2214 sp->req->sav->sah != NULL) {
2215 ro = &sp->req->sav->sah->sa_route;
2216 if (ro->ro_rt != NULL &&
2217 ro->ro_rt->rt_ifp != NULL) {
2218 destmtu =
2219 ro->ro_rt->rt_ifp->if_mtu;
2220 destmtu -= ipsechdr;
2224 KEY_FREESP(&sp);
2227 #else /* !IPSEC && !FAST_IPSEC */
2228 if (fwd_ro.ro_rt != NULL)
2229 destmtu = fwd_ro.ro_rt->rt_ifp->if_mtu;
2230 #endif /*IPSEC*/
2231 ipstat.ips_cantfrag++;
2232 break;
2234 case ENOBUFS:
2235 type = ICMP_SOURCEQUENCH;
2236 code = 0;
2237 break;
2239 case EACCES: /* ipfw denied packet */
2240 panic("ip_forward EACCES should not reach");
2242 icmp_error(mcopy, type, code, dest, destmtu);
2243 done:
2244 if (mtemp != NULL)
2245 m_tag_delete_chain(mtemp);
2246 if (fwd_ro.ro_rt != NULL)
2247 RTFREE(fwd_ro.ro_rt);
2250 void
2251 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
2252 struct mbuf *m)
2254 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
2255 struct timeval tv;
2257 microtime(&tv);
2258 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
2259 SCM_TIMESTAMP, SOL_SOCKET);
2260 if (*mp)
2261 mp = &(*mp)->m_next;
2263 if (inp->inp_flags & INP_RECVDSTADDR) {
2264 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
2265 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
2266 if (*mp)
2267 mp = &(*mp)->m_next;
2269 if (inp->inp_flags & INP_RECVTTL) {
2270 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl,
2271 sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
2272 if (*mp)
2273 mp = &(*mp)->m_next;
2275 #ifdef notyet
2276 /* XXX
2277 * Moving these out of udp_input() made them even more broken
2278 * than they already were.
2280 /* options were tossed already */
2281 if (inp->inp_flags & INP_RECVOPTS) {
2282 *mp = sbcreatecontrol((caddr_t) opts_deleted_above,
2283 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
2284 if (*mp)
2285 mp = &(*mp)->m_next;
2287 /* ip_srcroute doesn't do what we want here, need to fix */
2288 if (inp->inp_flags & INP_RECVRETOPTS) {
2289 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m),
2290 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
2291 if (*mp)
2292 mp = &(*mp)->m_next;
2294 #endif
2295 if (inp->inp_flags & INP_RECVIF) {
2296 struct ifnet *ifp;
2297 struct sdlbuf {
2298 struct sockaddr_dl sdl;
2299 u_char pad[32];
2300 } sdlbuf;
2301 struct sockaddr_dl *sdp;
2302 struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
2304 if (((ifp = m->m_pkthdr.rcvif)) &&
2305 ((ifp->if_index != 0) && (ifp->if_index <= if_index))) {
2306 sdp = IF_LLSOCKADDR(ifp);
2308 * Change our mind and don't try copy.
2310 if ((sdp->sdl_family != AF_LINK) ||
2311 (sdp->sdl_len > sizeof(sdlbuf))) {
2312 goto makedummy;
2314 bcopy(sdp, sdl2, sdp->sdl_len);
2315 } else {
2316 makedummy:
2317 sdl2->sdl_len =
2318 offsetof(struct sockaddr_dl, sdl_data[0]);
2319 sdl2->sdl_family = AF_LINK;
2320 sdl2->sdl_index = 0;
2321 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
2323 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len,
2324 IP_RECVIF, IPPROTO_IP);
2325 if (*mp)
2326 mp = &(*mp)->m_next;
2331 * XXX these routines are called from the upper part of the kernel.
2333 * They could also be moved to ip_mroute.c, since all the RSVP
2334 * handling is done there already.
2337 ip_rsvp_init(struct socket *so)
2339 if (so->so_type != SOCK_RAW ||
2340 so->so_proto->pr_protocol != IPPROTO_RSVP)
2341 return EOPNOTSUPP;
2343 if (ip_rsvpd != NULL)
2344 return EADDRINUSE;
2346 ip_rsvpd = so;
2348 * This may seem silly, but we need to be sure we don't over-increment
2349 * the RSVP counter, in case something slips up.
2351 if (!ip_rsvp_on) {
2352 ip_rsvp_on = 1;
2353 rsvp_on++;
2356 return 0;
2360 ip_rsvp_done(void)
2362 ip_rsvpd = NULL;
2364 * This may seem silly, but we need to be sure we don't over-decrement
2365 * the RSVP counter, in case something slips up.
2367 if (ip_rsvp_on) {
2368 ip_rsvp_on = 0;
2369 rsvp_on--;
2371 return 0;
2375 rsvp_input(struct mbuf **mp, int *offp, int proto)
2377 struct mbuf *m = *mp;
2379 *mp = NULL;
2381 if (rsvp_input_p) { /* call the real one if loaded */
2382 *mp = m;
2383 rsvp_input_p(mp, offp, proto);
2384 return(IPPROTO_DONE);
2387 /* Can still get packets with rsvp_on = 0 if there is a local member
2388 * of the group to which the RSVP packet is addressed. But in this
2389 * case we want to throw the packet away.
2392 if (!rsvp_on) {
2393 m_freem(m);
2394 return(IPPROTO_DONE);
2397 if (ip_rsvpd != NULL) {
2398 *mp = m;
2399 rip_input(mp, offp, proto);
2400 return(IPPROTO_DONE);
2402 /* Drop the packet */
2403 m_freem(m);
2404 return(IPPROTO_DONE);