1 /* $FreeBSD: src/sys/contrib/pf/net/pf_norm.c,v 1.10 2004/08/14 15:32:40 dwmalone Exp $ */
2 /* $OpenBSD: pf_norm.c,v 1.80.2.1 2004/04/30 21:46:33 brad Exp $ */
3 /* add $OpenBSD: pf_norm.c,v 1.87 2004/05/11 07:34:11 dhartmei Exp $ */
4 /* $DragonFly: src/sys/net/pf/pf_norm.c,v 1.10 2008/09/04 09:08:22 hasso Exp $ */
7 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
9 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "opt_inet6.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/filio.h>
40 #include <sys/fcntl.h>
41 #include <sys/socket.h>
42 #include <sys/kernel.h>
44 #include <vm/vm_zone.h>
47 #include <net/if_types.h>
49 #include <net/route.h>
50 #include <net/pf/if_pflog.h>
52 #include <netinet/in.h>
53 #include <netinet/in_var.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_seq.h>
59 #include <netinet/udp.h>
60 #include <netinet/ip_icmp.h>
63 #include <netinet/ip6.h>
66 #include <net/pf/pfvar.h>
70 * XXX: This should go to netinet/ip6.h (KAME)
74 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
75 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
76 #define PFFRAG_DROP 0x0004 /* Drop all fragments */
77 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
80 TAILQ_HEAD(pf_fragqueue
, pf_fragment
) pf_fragqueue
;
81 TAILQ_HEAD(pf_cachequeue
, pf_fragment
) pf_cachequeue
;
83 static int pf_frag_compare(struct pf_fragment
*,
84 struct pf_fragment
*);
85 RB_HEAD(pf_frag_tree
, pf_fragment
) pf_frag_tree
, pf_cache_tree
;
86 RB_PROTOTYPE(pf_frag_tree
, pf_fragment
, fr_entry
, pf_frag_compare
);
87 RB_GENERATE(pf_frag_tree
, pf_fragment
, fr_entry
, pf_frag_compare
);
89 /* Private prototypes */
90 void pf_ip2key(struct pf_fragment
*, struct ip
*);
91 void pf_remove_fragment(struct pf_fragment
*);
92 void pf_flush_fragments(void);
93 void pf_free_fragment(struct pf_fragment
*);
94 struct pf_fragment
*pf_find_fragment(struct ip
*, struct pf_frag_tree
*);
95 struct mbuf
*pf_reassemble(struct mbuf
**, struct pf_fragment
**,
96 struct pf_frent
*, int);
97 struct mbuf
*pf_fragcache(struct mbuf
**, struct ip
*,
98 struct pf_fragment
**, int, int, int *);
99 u_int16_t
pf_cksum_fixup(u_int16_t
, u_int16_t
, u_int16_t
);
100 int pf_normalize_tcpopt(struct pf_rule
*, struct mbuf
*,
101 struct tcphdr
*, int);
103 #define DPFPRINTF(x) if (pf_status.debug >= PF_DEBUG_MISC) \
104 { kprintf("%s: ", __func__); kprintf x ;}
107 vm_zone_t pf_frent_pl
, pf_frag_pl
, pf_cache_pl
, pf_cent_pl
;
108 vm_zone_t pf_state_scrub_pl
;
109 int pf_nfrents
, pf_ncache
;
112 pf_normalize_init(void)
115 pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
116 pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
117 pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
118 pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
121 TAILQ_INIT(&pf_fragqueue
);
122 TAILQ_INIT(&pf_cachequeue
);
126 pf_frag_compare(struct pf_fragment
*a
, struct pf_fragment
*b
)
130 if ((diff
= a
->fr_id
- b
->fr_id
))
132 else if ((diff
= a
->fr_p
- b
->fr_p
))
134 else if (a
->fr_src
.s_addr
< b
->fr_src
.s_addr
)
136 else if (a
->fr_src
.s_addr
> b
->fr_src
.s_addr
)
138 else if (a
->fr_dst
.s_addr
< b
->fr_dst
.s_addr
)
140 else if (a
->fr_dst
.s_addr
> b
->fr_dst
.s_addr
)
146 pf_purge_expired_fragments(void)
148 struct pf_fragment
*frag
;
149 u_int32_t expire
= time_second
-
150 pf_default_rule
.timeout
[PFTM_FRAG
];
152 while ((frag
= TAILQ_LAST(&pf_fragqueue
, pf_fragqueue
)) != NULL
) {
153 KASSERT((BUFFER_FRAGMENTS(frag
)),
154 ("BUFFER_FRAGMENTS(frag) == 0: %s", __func__
));
155 if (frag
->fr_timeout
> expire
)
158 DPFPRINTF(("expiring %d(%p)\n", frag
->fr_id
, frag
));
159 pf_free_fragment(frag
);
162 while ((frag
= TAILQ_LAST(&pf_cachequeue
, pf_cachequeue
)) != NULL
) {
163 KASSERT((!BUFFER_FRAGMENTS(frag
)),
164 ("BUFFER_FRAGMENTS(frag) != 0: %s", __func__
));
165 if (frag
->fr_timeout
> expire
)
168 DPFPRINTF(("expiring %d(%p)\n", frag
->fr_id
, frag
));
169 pf_free_fragment(frag
);
170 KASSERT((TAILQ_EMPTY(&pf_cachequeue
) ||
171 TAILQ_LAST(&pf_cachequeue
, pf_cachequeue
) != frag
),
172 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
178 * Try to flush old fragments to make space for new ones
182 pf_flush_fragments(void)
184 struct pf_fragment
*frag
;
187 goal
= pf_nfrents
* 9 / 10;
188 DPFPRINTF(("trying to free > %d frents\n",
190 while (goal
< pf_nfrents
) {
191 frag
= TAILQ_LAST(&pf_fragqueue
, pf_fragqueue
);
194 pf_free_fragment(frag
);
198 goal
= pf_ncache
* 9 / 10;
199 DPFPRINTF(("trying to free > %d cache entries\n",
201 while (goal
< pf_ncache
) {
202 frag
= TAILQ_LAST(&pf_cachequeue
, pf_cachequeue
);
205 pf_free_fragment(frag
);
209 /* Frees the fragments and all associated entries */
212 pf_free_fragment(struct pf_fragment
*frag
)
214 struct pf_frent
*frent
;
215 struct pf_frcache
*frcache
;
217 /* Free all fragments */
218 if (BUFFER_FRAGMENTS(frag
)) {
219 for (frent
= LIST_FIRST(&frag
->fr_queue
); frent
;
220 frent
= LIST_FIRST(&frag
->fr_queue
)) {
221 LIST_REMOVE(frent
, fr_next
);
223 m_freem(frent
->fr_m
);
224 pool_put(&pf_frent_pl
, frent
);
228 for (frcache
= LIST_FIRST(&frag
->fr_cache
); frcache
;
229 frcache
= LIST_FIRST(&frag
->fr_cache
)) {
230 LIST_REMOVE(frcache
, fr_next
);
232 KASSERT((LIST_EMPTY(&frag
->fr_cache
) ||
233 LIST_FIRST(&frag
->fr_cache
)->fr_off
>
235 ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >"
236 " frcache->fr_end): %s", __func__
));
238 pool_put(&pf_cent_pl
, frcache
);
243 pf_remove_fragment(frag
);
247 pf_ip2key(struct pf_fragment
*key
, struct ip
*ip
)
249 key
->fr_p
= ip
->ip_p
;
250 key
->fr_id
= ip
->ip_id
;
251 key
->fr_src
.s_addr
= ip
->ip_src
.s_addr
;
252 key
->fr_dst
.s_addr
= ip
->ip_dst
.s_addr
;
256 pf_find_fragment(struct ip
*ip
, struct pf_frag_tree
*tree
)
258 struct pf_fragment key
;
259 struct pf_fragment
*frag
;
263 frag
= RB_FIND(pf_frag_tree
, tree
, &key
);
265 /* XXX Are we sure we want to update the timeout? */
266 frag
->fr_timeout
= time_second
;
267 if (BUFFER_FRAGMENTS(frag
)) {
268 TAILQ_REMOVE(&pf_fragqueue
, frag
, frag_next
);
269 TAILQ_INSERT_HEAD(&pf_fragqueue
, frag
, frag_next
);
271 TAILQ_REMOVE(&pf_cachequeue
, frag
, frag_next
);
272 TAILQ_INSERT_HEAD(&pf_cachequeue
, frag
, frag_next
);
279 /* Removes a fragment from the fragment queue and frees the fragment */
282 pf_remove_fragment(struct pf_fragment
*frag
)
284 if (BUFFER_FRAGMENTS(frag
)) {
285 RB_REMOVE(pf_frag_tree
, &pf_frag_tree
, frag
);
286 TAILQ_REMOVE(&pf_fragqueue
, frag
, frag_next
);
287 pool_put(&pf_frag_pl
, frag
);
289 RB_REMOVE(pf_frag_tree
, &pf_cache_tree
, frag
);
290 TAILQ_REMOVE(&pf_cachequeue
, frag
, frag_next
);
291 pool_put(&pf_cache_pl
, frag
);
295 #define FR_IP_OFF(fr) (((fr)->fr_ip->ip_off & IP_OFFMASK) << 3)
297 pf_reassemble(struct mbuf
**m0
, struct pf_fragment
**frag
,
298 struct pf_frent
*frent
, int mff
)
300 struct mbuf
*m
= *m0
, *m2
;
301 struct pf_frent
*frea
, *next
;
302 struct pf_frent
*frep
= NULL
;
303 struct ip
*ip
= frent
->fr_ip
;
304 int hlen
= ip
->ip_hl
<< 2;
305 u_int16_t off
= (ip
->ip_off
& IP_OFFMASK
) << 3;
306 u_int16_t ip_len
= ip
->ip_len
- ip
->ip_hl
* 4;
307 u_int16_t max
= ip_len
+ off
;
309 KASSERT((*frag
== NULL
|| BUFFER_FRAGMENTS(*frag
)),
310 ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __func__
));
312 /* Strip off ip header */
316 /* Create a new reassembly queue for this packet */
318 *frag
= pool_get(&pf_frag_pl
, PR_NOWAIT
);
320 pf_flush_fragments();
321 *frag
= pool_get(&pf_frag_pl
, PR_NOWAIT
);
326 (*frag
)->fr_flags
= 0;
328 (*frag
)->fr_src
= frent
->fr_ip
->ip_src
;
329 (*frag
)->fr_dst
= frent
->fr_ip
->ip_dst
;
330 (*frag
)->fr_p
= frent
->fr_ip
->ip_p
;
331 (*frag
)->fr_id
= frent
->fr_ip
->ip_id
;
332 (*frag
)->fr_timeout
= time_second
;
333 LIST_INIT(&(*frag
)->fr_queue
);
335 RB_INSERT(pf_frag_tree
, &pf_frag_tree
, *frag
);
336 TAILQ_INSERT_HEAD(&pf_fragqueue
, *frag
, frag_next
);
338 /* We do not have a previous fragment */
344 * Find a fragment after the current one:
345 * - off contains the real shifted offset.
347 LIST_FOREACH(frea
, &(*frag
)->fr_queue
, fr_next
) {
348 if (FR_IP_OFF(frea
) > off
)
353 KASSERT((frep
!= NULL
|| frea
!= NULL
),
354 ("!(frep != NULL || frea != NULL): %s", __func__
));
357 FR_IP_OFF(frep
) + frep
->fr_ip
->ip_len
- frep
->fr_ip
->ip_hl
*
362 precut
= FR_IP_OFF(frep
) + frep
->fr_ip
->ip_len
-
363 frep
->fr_ip
->ip_hl
* 4 - off
;
364 if (precut
>= ip_len
)
366 m_adj(frent
->fr_m
, precut
);
367 DPFPRINTF(("overlap -%d\n", precut
));
368 /* Enforce 8 byte boundaries */
369 ip
->ip_off
= ip
->ip_off
+ (precut
>> 3);
370 off
= (ip
->ip_off
& IP_OFFMASK
) << 3;
375 for (; frea
!= NULL
&& ip_len
+ off
> FR_IP_OFF(frea
);
380 aftercut
= ip_len
+ off
- FR_IP_OFF(frea
);
381 DPFPRINTF(("adjust overlap %d\n", aftercut
));
382 if (aftercut
< frea
->fr_ip
->ip_len
- frea
->fr_ip
->ip_hl
385 frea
->fr_ip
->ip_len
=
386 frea
->fr_ip
->ip_len
- aftercut
;
387 frea
->fr_ip
->ip_off
= frea
->fr_ip
->ip_off
+
389 m_adj(frea
->fr_m
, aftercut
);
393 /* This fragment is completely overlapped, loose it */
394 next
= LIST_NEXT(frea
, fr_next
);
396 LIST_REMOVE(frea
, fr_next
);
397 pool_put(&pf_frent_pl
, frea
);
402 /* Update maximum data size */
403 if ((*frag
)->fr_max
< max
)
404 (*frag
)->fr_max
= max
;
405 /* This is the last segment */
407 (*frag
)->fr_flags
|= PFFRAG_SEENLAST
;
410 LIST_INSERT_HEAD(&(*frag
)->fr_queue
, frent
, fr_next
);
412 LIST_INSERT_AFTER(frep
, frent
, fr_next
);
414 /* Check if we are completely reassembled */
415 if (!((*frag
)->fr_flags
& PFFRAG_SEENLAST
))
418 /* Check if we have all the data */
420 for (frep
= LIST_FIRST(&(*frag
)->fr_queue
); frep
; frep
= next
) {
421 next
= LIST_NEXT(frep
, fr_next
);
423 off
+= frep
->fr_ip
->ip_len
- frep
->fr_ip
->ip_hl
* 4;
424 if (off
< (*frag
)->fr_max
&&
425 (next
== NULL
|| FR_IP_OFF(next
) != off
))
427 DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
428 off
, next
== NULL
? -1 : FR_IP_OFF(next
),
433 DPFPRINTF(("%d < %d?\n", off
, (*frag
)->fr_max
));
434 if (off
< (*frag
)->fr_max
)
437 /* We have all the data */
438 frent
= LIST_FIRST(&(*frag
)->fr_queue
);
439 KASSERT((frent
!= NULL
), ("frent == NULL: %s", __func__
));
440 if ((frent
->fr_ip
->ip_hl
<< 2) + off
> IP_MAXPACKET
) {
441 DPFPRINTF(("drop: too big: %d\n", off
));
442 pf_free_fragment(*frag
);
446 next
= LIST_NEXT(frent
, fr_next
);
448 /* Magic from ip_input */
454 pool_put(&pf_frent_pl
, frent
);
456 for (frent
= next
; frent
!= NULL
; frent
= next
) {
457 next
= LIST_NEXT(frent
, fr_next
);
460 pool_put(&pf_frent_pl
, frent
);
462 m
->m_pkthdr
.csum_flags
&= m2
->m_pkthdr
.csum_flags
;
463 m
->m_pkthdr
.csum_data
+= m2
->m_pkthdr
.csum_data
;
468 * Note: this 1's complement optimization with <= 65535 fragments.
470 * Handle 1's complement carry for the 16 bit result. This can
471 * result in another carry which must also be handled.
473 m
->m_pkthdr
.csum_data
= (m
->m_pkthdr
.csum_data
& 0xffff) +
474 (m
->m_pkthdr
.csum_data
>> 16);
475 if (m
->m_pkthdr
.csum_data
> 0xFFFF)
476 m
->m_pkthdr
.csum_data
-= 0xFFFF;
479 ip
->ip_src
= (*frag
)->fr_src
;
480 ip
->ip_dst
= (*frag
)->fr_dst
;
482 /* Remove from fragment queue */
483 pf_remove_fragment(*frag
);
486 hlen
= ip
->ip_hl
<< 2;
487 ip
->ip_len
= off
+ hlen
;
491 /* some debugging cruft by sklower, below, will go away soon */
492 /* XXX this should be done elsewhere */
493 if (m
->m_flags
& M_PKTHDR
) {
495 for (m2
= m
; m2
; m2
= m2
->m_next
)
497 m
->m_pkthdr
.len
= plen
;
500 DPFPRINTF(("complete: %p(%d)\n", m
, ip
->ip_len
));
504 /* Oops - fail safe - drop packet */
505 pool_put(&pf_frent_pl
, frent
);
512 pf_fragcache(struct mbuf
**m0
, struct ip
*h
, struct pf_fragment
**frag
, int mff
,
513 int drop
, int *nomem
)
515 struct mbuf
*m
= *m0
;
516 struct pf_frcache
*frp
, *fra
, *cur
= NULL
;
517 int ip_len
= h
->ip_len
- (h
->ip_hl
<< 2);
518 u_int16_t off
= h
->ip_off
<< 3;
519 u_int16_t max
= ip_len
+ off
;
522 KASSERT((*frag
== NULL
|| !BUFFER_FRAGMENTS(*frag
)),
523 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __func__
));
525 /* Create a new range queue for this packet */
527 *frag
= pool_get(&pf_cache_pl
, PR_NOWAIT
);
529 pf_flush_fragments();
530 *frag
= pool_get(&pf_cache_pl
, PR_NOWAIT
);
535 /* Get an entry for the queue */
536 cur
= pool_get(&pf_cent_pl
, PR_NOWAIT
);
538 pool_put(&pf_cache_pl
, *frag
);
544 (*frag
)->fr_flags
= PFFRAG_NOBUFFER
;
546 (*frag
)->fr_src
= h
->ip_src
;
547 (*frag
)->fr_dst
= h
->ip_dst
;
548 (*frag
)->fr_p
= h
->ip_p
;
549 (*frag
)->fr_id
= h
->ip_id
;
550 (*frag
)->fr_timeout
= time_second
;
554 LIST_INIT(&(*frag
)->fr_cache
);
555 LIST_INSERT_HEAD(&(*frag
)->fr_cache
, cur
, fr_next
);
557 RB_INSERT(pf_frag_tree
, &pf_cache_tree
, *frag
);
558 TAILQ_INSERT_HEAD(&pf_cachequeue
, *frag
, frag_next
);
560 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h
->ip_id
, off
, max
));
566 * Find a fragment after the current one:
567 * - off contains the real shifted offset.
570 LIST_FOREACH(fra
, &(*frag
)->fr_cache
, fr_next
) {
571 if (fra
->fr_off
> off
)
576 KASSERT((frp
!= NULL
|| fra
!= NULL
),
577 ("!(frp != NULL || fra != NULL): %s", __func__
));
582 precut
= frp
->fr_end
- off
;
583 if (precut
>= ip_len
) {
584 /* Fragment is entirely a duplicate */
585 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
586 h
->ip_id
, frp
->fr_off
, frp
->fr_end
, off
, max
));
590 /* They are adjacent. Fixup cache entry */
591 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
592 h
->ip_id
, frp
->fr_off
, frp
->fr_end
, off
, max
));
594 } else if (precut
> 0) {
595 /* The first part of this payload overlaps with a
596 * fragment that has already been passed.
597 * Need to trim off the first part of the payload.
598 * But to do so easily, we need to create another
599 * mbuf to throw the original header into.
602 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
603 h
->ip_id
, precut
, frp
->fr_off
, frp
->fr_end
, off
,
608 /* Update the previous frag to encompass this one */
612 /* XXX Optimization opportunity
613 * This is a very heavy way to trim the payload.
614 * we could do it much faster by diddling mbuf
615 * internals but that would be even less legible
616 * than this mbuf magic. For my next trick,
617 * I'll pull a rabbit out of my laptop.
619 *m0
= m_dup(m
, MB_DONTWAIT
);
620 /* From KAME Project : We have missed this! */
621 m_adj(*m0
, (h
->ip_hl
<< 2) -
622 (*m0
)->m_pkthdr
.len
);
625 KASSERT(((*m0
)->m_next
== NULL
),
626 ("(*m0)->m_next != NULL: %s",
628 m_adj(m
, precut
+ (h
->ip_hl
<< 2));
631 if (m
->m_flags
& M_PKTHDR
) {
634 for (t
= m
; t
; t
= t
->m_next
)
636 m
->m_pkthdr
.len
= plen
;
640 h
= mtod(m
, struct ip
*);
642 KASSERT(((int)m
->m_len
==
644 ("m->m_len != h->ip_len - precut: %s",
646 h
->ip_off
= h
->ip_off
+
648 h
->ip_len
= h
->ip_len
- precut
;
653 /* There is a gap between fragments */
655 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
656 h
->ip_id
, -precut
, frp
->fr_off
, frp
->fr_end
, off
,
659 cur
= pool_get(&pf_cent_pl
, PR_NOWAIT
);
666 LIST_INSERT_AFTER(frp
, cur
, fr_next
);
674 aftercut
= max
- fra
->fr_off
;
676 /* Adjacent fragments */
677 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
678 h
->ip_id
, off
, max
, fra
->fr_off
, fra
->fr_end
));
681 } else if (aftercut
> 0) {
682 /* Need to chop off the tail of this fragment */
683 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
684 h
->ip_id
, aftercut
, off
, max
, fra
->fr_off
,
693 if (m
->m_flags
& M_PKTHDR
) {
696 for (t
= m
; t
; t
= t
->m_next
)
698 m
->m_pkthdr
.len
= plen
;
700 h
= mtod(m
, struct ip
*);
701 KASSERT(((int)m
->m_len
== h
->ip_len
- aftercut
),
702 ("m->m_len != h->ip_len - aftercut: %s",
704 h
->ip_len
= h
->ip_len
- aftercut
;
708 } else if (frp
== NULL
) {
709 /* There is a gap between fragments */
710 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
711 h
->ip_id
, -aftercut
, off
, max
, fra
->fr_off
,
714 cur
= pool_get(&pf_cent_pl
, PR_NOWAIT
);
721 LIST_INSERT_BEFORE(fra
, cur
, fr_next
);
725 /* Need to glue together two separate fragment descriptors */
727 if (cur
&& fra
->fr_off
<= cur
->fr_end
) {
728 /* Need to merge in a previous 'cur' */
729 DPFPRINTF(("fragcache[%d]: adjacent(merge "
730 "%d-%d) %d-%d (%d-%d)\n",
731 h
->ip_id
, cur
->fr_off
, cur
->fr_end
, off
,
732 max
, fra
->fr_off
, fra
->fr_end
));
733 fra
->fr_off
= cur
->fr_off
;
734 LIST_REMOVE(cur
, fr_next
);
735 pool_put(&pf_cent_pl
, cur
);
739 } else if (frp
&& fra
->fr_off
<= frp
->fr_end
) {
740 /* Need to merge in a modified 'frp' */
741 KASSERT((cur
== NULL
), ("cur != NULL: %s",
743 DPFPRINTF(("fragcache[%d]: adjacent(merge "
744 "%d-%d) %d-%d (%d-%d)\n",
745 h
->ip_id
, frp
->fr_off
, frp
->fr_end
, off
,
746 max
, fra
->fr_off
, fra
->fr_end
));
747 fra
->fr_off
= frp
->fr_off
;
748 LIST_REMOVE(frp
, fr_next
);
749 pool_put(&pf_cent_pl
, frp
);
759 * We must keep tracking the overall fragment even when
760 * we're going to drop it anyway so that we know when to
761 * free the overall descriptor. Thus we drop the frag late.
768 /* Update maximum data size */
769 if ((*frag
)->fr_max
< max
)
770 (*frag
)->fr_max
= max
;
772 /* This is the last segment */
774 (*frag
)->fr_flags
|= PFFRAG_SEENLAST
;
776 /* Check if we are completely reassembled */
777 if (((*frag
)->fr_flags
& PFFRAG_SEENLAST
) &&
778 LIST_FIRST(&(*frag
)->fr_cache
)->fr_off
== 0 &&
779 LIST_FIRST(&(*frag
)->fr_cache
)->fr_end
== (*frag
)->fr_max
) {
780 /* Remove from fragment queue */
781 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h
->ip_id
,
783 pf_free_fragment(*frag
);
792 /* Still need to pay attention to !IP_MF */
793 if (!mff
&& *frag
!= NULL
)
794 (*frag
)->fr_flags
|= PFFRAG_SEENLAST
;
801 /* Still need to pay attention to !IP_MF */
802 if (!mff
&& *frag
!= NULL
)
803 (*frag
)->fr_flags
|= PFFRAG_SEENLAST
;
806 /* This fragment has been deemed bad. Don't reass */
807 if (((*frag
)->fr_flags
& PFFRAG_DROP
) == 0)
808 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
810 (*frag
)->fr_flags
|= PFFRAG_DROP
;
818 pf_normalize_ip(struct mbuf
**m0
, int dir
, struct pfi_kif
*kif
, u_short
*reason
)
820 struct mbuf
*m
= *m0
;
822 struct pf_frent
*frent
;
823 struct pf_fragment
*frag
= NULL
;
824 struct ip
*h
= mtod(m
, struct ip
*);
825 int mff
= (h
->ip_off
& IP_MF
);
826 int hlen
= h
->ip_hl
<< 2;
827 u_int16_t fragoff
= (h
->ip_off
& IP_OFFMASK
) << 3;
832 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_SCRUB
].active
.ptr
);
835 if (r
->kif
!= NULL
&&
836 (r
->kif
!= kif
&& r
->kif
!= kif
->pfik_parent
) == !r
->ifnot
)
837 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
838 else if (r
->direction
&& r
->direction
!= dir
)
839 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
840 else if (r
->af
&& r
->af
!= AF_INET
)
841 r
= r
->skip
[PF_SKIP_AF
].ptr
;
842 else if (r
->proto
&& r
->proto
!= h
->ip_p
)
843 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
844 else if (PF_MISMATCHAW(&r
->src
.addr
,
845 (struct pf_addr
*)&h
->ip_src
.s_addr
, AF_INET
, r
->src
.not))
846 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
847 else if (PF_MISMATCHAW(&r
->dst
.addr
,
848 (struct pf_addr
*)&h
->ip_dst
.s_addr
, AF_INET
, r
->dst
.not))
849 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
859 /* Check for illegal packets */
860 if (hlen
< (int)sizeof(struct ip
))
863 if (hlen
> h
->ip_len
)
866 /* Clear IP_DF if the rule uses the no-df option */
867 if (r
->rule_flag
& PFRULE_NODF
)
870 /* We will need other tests here */
871 if (!fragoff
&& !mff
)
874 /* We're dealing with a fragment now. Don't allow fragments
875 * with IP_DF to enter the cache. If the flag was cleared by
876 * no-df above, fine. Otherwise drop it.
878 if (h
->ip_off
& IP_DF
) {
879 DPFPRINTF(("IP_DF\n"));
883 ip_len
= h
->ip_len
- hlen
;
884 ip_off
= (h
->ip_off
& IP_OFFMASK
) << 3;
886 /* All fragments are 8 byte aligned */
887 if (mff
&& (ip_len
& 0x7)) {
888 DPFPRINTF(("mff and %d\n", ip_len
));
892 /* Respect maximum length */
893 if (fragoff
+ ip_len
> IP_MAXPACKET
) {
894 DPFPRINTF(("max packet %d\n", fragoff
+ ip_len
));
897 max
= fragoff
+ ip_len
;
899 if ((r
->rule_flag
& (PFRULE_FRAGCROP
|PFRULE_FRAGDROP
)) == 0) {
900 /* Fully buffer all of the fragments */
902 frag
= pf_find_fragment(h
, &pf_frag_tree
);
904 /* Check if we saw the last fragment already */
905 if (frag
!= NULL
&& (frag
->fr_flags
& PFFRAG_SEENLAST
) &&
909 /* Get an entry for the fragment queue */
910 frent
= pool_get(&pf_frent_pl
, PR_NOWAIT
);
912 REASON_SET(reason
, PFRES_MEMORY
);
919 /* Might return a completely reassembled mbuf, or NULL */
920 DPFPRINTF(("reass frag %d @ %d-%d\n", h
->ip_id
, fragoff
, max
));
921 *m0
= m
= pf_reassemble(m0
, &frag
, frent
, mff
);
926 if (frag
!= NULL
&& (frag
->fr_flags
& PFFRAG_DROP
))
929 h
= mtod(m
, struct ip
*);
931 /* non-buffering fragment cache (drops or masks overlaps) */
935 if (m
->m_pkthdr
.fw_flags
& PF_MBUF_FRAGCACHE
) {
936 /* Already passed the fragment cache in the
937 * input direction. If we continued, it would
938 * appear to be a dup and would be dropped.
944 frag
= pf_find_fragment(h
, &pf_cache_tree
);
946 /* Check if we saw the last fragment already */
947 if (frag
!= NULL
&& (frag
->fr_flags
& PFFRAG_SEENLAST
) &&
948 max
> frag
->fr_max
) {
949 if (r
->rule_flag
& PFRULE_FRAGDROP
)
950 frag
->fr_flags
|= PFFRAG_DROP
;
954 *m0
= m
= pf_fragcache(m0
, h
, &frag
, mff
,
955 (r
->rule_flag
& PFRULE_FRAGDROP
) ? 1 : 0, &nomem
);
963 m
->m_pkthdr
.fw_flags
|= PF_MBUF_FRAGCACHE
;
965 if (frag
!= NULL
&& (frag
->fr_flags
& PFFRAG_DROP
))
971 /* At this point, only IP_DF is allowed in ip_off */
974 /* Enforce a minimum ttl, may cause endless packet loops */
975 if (r
->min_ttl
&& h
->ip_ttl
< r
->min_ttl
)
976 h
->ip_ttl
= r
->min_ttl
;
978 if (r
->rule_flag
& PFRULE_RANDOMID
) {
980 h
->ip_id
= ip_randomid();
982 h
->ip_id
= htons(ip_id
++);
989 /* Enforce a minimum ttl, may cause endless packet loops */
990 if (r
->min_ttl
&& h
->ip_ttl
< r
->min_ttl
)
991 h
->ip_ttl
= r
->min_ttl
;
996 REASON_SET(reason
, PFRES_MEMORY
);
997 if (r
!= NULL
&& r
->log
)
998 PFLOG_PACKET(kif
, h
, m
, AF_INET
, dir
, *reason
, r
, NULL
, NULL
);
1002 REASON_SET(reason
, PFRES_NORM
);
1003 if (r
!= NULL
&& r
->log
)
1004 PFLOG_PACKET(kif
, h
, m
, AF_INET
, dir
, *reason
, r
, NULL
, NULL
);
1008 DPFPRINTF(("dropping bad fragment\n"));
1010 /* Free associated fragments */
1012 pf_free_fragment(frag
);
1014 REASON_SET(reason
, PFRES_FRAG
);
1015 if (r
!= NULL
&& r
->log
)
1016 PFLOG_PACKET(kif
, h
, m
, AF_INET
, dir
, *reason
, r
, NULL
, NULL
);
1023 pf_normalize_ip6(struct mbuf
**m0
, int dir
, struct pfi_kif
*kif
,
1026 struct mbuf
*m
= *m0
;
1028 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
1032 struct ip6_opt_jumbo jumbo
;
1033 struct ip6_frag frag
;
1034 u_int32_t jumbolen
= 0, plen
;
1035 u_int16_t fragoff
= 0;
1041 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_SCRUB
].active
.ptr
);
1044 if (r
->kif
!= NULL
&&
1045 (r
->kif
!= kif
&& r
->kif
!= kif
->pfik_parent
) == !r
->ifnot
)
1046 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
1047 else if (r
->direction
&& r
->direction
!= dir
)
1048 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
1049 else if (r
->af
&& r
->af
!= AF_INET6
)
1050 r
= r
->skip
[PF_SKIP_AF
].ptr
;
1051 #if 0 /* header chain! */
1052 else if (r
->proto
&& r
->proto
!= h
->ip6_nxt
)
1053 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
1055 else if (PF_MISMATCHAW(&r
->src
.addr
,
1056 (struct pf_addr
*)&h
->ip6_src
, AF_INET6
, r
->src
.not))
1057 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
1058 else if (PF_MISMATCHAW(&r
->dst
.addr
,
1059 (struct pf_addr
*)&h
->ip6_dst
, AF_INET6
, r
->dst
.not))
1060 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
1070 /* Check for illegal packets */
1071 if (sizeof(struct ip6_hdr
) + IPV6_MAXPACKET
< m
->m_pkthdr
.len
)
1074 off
= sizeof(struct ip6_hdr
);
1079 case IPPROTO_FRAGMENT
:
1083 case IPPROTO_ROUTING
:
1084 case IPPROTO_DSTOPTS
:
1085 if (!pf_pull_hdr(m
, off
, &ext
, sizeof(ext
), NULL
,
1088 if (proto
== IPPROTO_AH
)
1089 off
+= (ext
.ip6e_len
+ 2) * 4;
1091 off
+= (ext
.ip6e_len
+ 1) * 8;
1092 proto
= ext
.ip6e_nxt
;
1094 case IPPROTO_HOPOPTS
:
1095 if (!pf_pull_hdr(m
, off
, &ext
, sizeof(ext
), NULL
,
1098 optend
= off
+ (ext
.ip6e_len
+ 1) * 8;
1099 ooff
= off
+ sizeof(ext
);
1101 if (!pf_pull_hdr(m
, ooff
, &opt
.ip6o_type
,
1102 sizeof(opt
.ip6o_type
), NULL
, NULL
,
1105 if (opt
.ip6o_type
== IP6OPT_PAD1
) {
1109 if (!pf_pull_hdr(m
, ooff
, &opt
, sizeof(opt
),
1110 NULL
, NULL
, AF_INET6
))
1112 if (ooff
+ sizeof(opt
) + opt
.ip6o_len
> optend
)
1114 switch (opt
.ip6o_type
) {
1116 if (h
->ip6_plen
!= 0)
1118 if (!pf_pull_hdr(m
, ooff
, &jumbo
,
1119 sizeof(jumbo
), NULL
, NULL
,
1122 memcpy(&jumbolen
, jumbo
.ip6oj_jumbo_len
,
1124 jumbolen
= ntohl(jumbolen
);
1125 if (jumbolen
<= IPV6_MAXPACKET
)
1127 if (sizeof(struct ip6_hdr
) + jumbolen
!=
1134 ooff
+= sizeof(opt
) + opt
.ip6o_len
;
1135 } while (ooff
< optend
);
1138 proto
= ext
.ip6e_nxt
;
1144 } while (!terminal
);
1146 /* jumbo payload option must be present, or plen > 0 */
1147 if (ntohs(h
->ip6_plen
) == 0)
1150 plen
= ntohs(h
->ip6_plen
);
1153 if (sizeof(struct ip6_hdr
) + plen
> m
->m_pkthdr
.len
)
1156 /* Enforce a minimum ttl, may cause endless packet loops */
1157 if (r
->min_ttl
&& h
->ip6_hlim
< r
->min_ttl
)
1158 h
->ip6_hlim
= r
->min_ttl
;
1163 if (ntohs(h
->ip6_plen
) == 0 || jumbolen
)
1165 plen
= ntohs(h
->ip6_plen
);
1167 if (!pf_pull_hdr(m
, off
, &frag
, sizeof(frag
), NULL
, NULL
, AF_INET6
))
1169 fragoff
= ntohs(frag
.ip6f_offlg
& IP6F_OFF_MASK
);
1170 if (fragoff
+ (plen
- off
- sizeof(frag
)) > IPV6_MAXPACKET
)
1173 /* do something about it */
1177 REASON_SET(reason
, PFRES_SHORT
);
1178 if (r
!= NULL
&& r
->log
)
1179 PFLOG_PACKET(kif
, h
, m
, AF_INET6
, dir
, *reason
, r
, NULL
, NULL
);
1183 REASON_SET(reason
, PFRES_NORM
);
1184 if (r
!= NULL
&& r
->log
)
1185 PFLOG_PACKET(kif
, h
, m
, AF_INET6
, dir
, *reason
, r
, NULL
, NULL
);
1189 REASON_SET(reason
, PFRES_FRAG
);
1190 if (r
!= NULL
&& r
->log
)
1191 PFLOG_PACKET(kif
, h
, m
, AF_INET6
, dir
, *reason
, r
, NULL
, NULL
);
1197 pf_normalize_tcp(int dir
, struct pfi_kif
*kif
, struct mbuf
*m
, int ipoff
,
1198 int off
, void *h
, struct pf_pdesc
*pd
)
1200 struct pf_rule
*r
, *rm
= NULL
;
1201 struct tcphdr
*th
= pd
->hdr
.tcp
;
1205 sa_family_t af
= pd
->af
;
1207 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_SCRUB
].active
.ptr
);
1210 if (r
->kif
!= NULL
&&
1211 (r
->kif
!= kif
&& r
->kif
!= kif
->pfik_parent
) == !r
->ifnot
)
1212 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
1213 else if (r
->direction
&& r
->direction
!= dir
)
1214 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
1215 else if (r
->af
&& r
->af
!= af
)
1216 r
= r
->skip
[PF_SKIP_AF
].ptr
;
1217 else if (r
->proto
&& r
->proto
!= pd
->proto
)
1218 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
1219 else if (PF_MISMATCHAW(&r
->src
.addr
, pd
->src
, af
, r
->src
.not))
1220 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
1221 else if (r
->src
.port_op
&& !pf_match_port(r
->src
.port_op
,
1222 r
->src
.port
[0], r
->src
.port
[1], th
->th_sport
))
1223 r
= r
->skip
[PF_SKIP_SRC_PORT
].ptr
;
1224 else if (PF_MISMATCHAW(&r
->dst
.addr
, pd
->dst
, af
, r
->dst
.not))
1225 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
1226 else if (r
->dst
.port_op
&& !pf_match_port(r
->dst
.port_op
,
1227 r
->dst
.port
[0], r
->dst
.port
[1], th
->th_dport
))
1228 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
1229 else if (r
->os_fingerprint
!= PF_OSFP_ANY
&& !pf_osfp_match(
1230 pf_osfp_fingerprint(pd
, m
, off
, th
),
1232 r
= TAILQ_NEXT(r
, entries
);
1244 if (rm
->rule_flag
& PFRULE_REASSEMBLE_TCP
)
1245 pd
->flags
|= PFDESC_TCP_NORM
;
1247 flags
= th
->th_flags
;
1248 if (flags
& TH_SYN
) {
1249 /* Illegal packet */
1256 /* Illegal packet */
1257 if (!(flags
& (TH_ACK
|TH_RST
)))
1261 if (!(flags
& TH_ACK
)) {
1262 /* These flags are only valid if ACK is set */
1263 if ((flags
& TH_FIN
) || (flags
& TH_PUSH
) || (flags
& TH_URG
))
1267 /* Check for illegal header length */
1268 if (th
->th_off
< (sizeof(struct tcphdr
) >> 2))
1271 /* If flags changed, or reserved data set, then adjust */
1272 if (flags
!= th
->th_flags
|| th
->th_x2
!= 0) {
1275 ov
= *(u_int16_t
*)(&th
->th_ack
+ 1);
1276 th
->th_flags
= flags
;
1278 nv
= *(u_int16_t
*)(&th
->th_ack
+ 1);
1280 th
->th_sum
= pf_cksum_fixup(th
->th_sum
, ov
, nv
);
1284 /* Remove urgent pointer, if TH_URG is not set */
1285 if (!(flags
& TH_URG
) && th
->th_urp
) {
1286 th
->th_sum
= pf_cksum_fixup(th
->th_sum
, th
->th_urp
, 0);
1291 /* Process options */
1292 if (r
->max_mss
&& pf_normalize_tcpopt(r
, m
, th
, off
))
1295 /* copy back packet headers if we sanitized */
1297 m_copyback(m
, off
, sizeof(*th
), (caddr_t
)th
);
1302 REASON_SET(&reason
, PFRES_NORM
);
1303 if (rm
!= NULL
&& r
->log
)
1304 PFLOG_PACKET(kif
, h
, m
, AF_INET
, dir
, reason
, r
, NULL
, NULL
);
1309 pf_normalize_tcp_init(struct mbuf
*m
, int off
, struct pf_pdesc
*pd
,
1310 struct tcphdr
*th
, struct pf_state_peer
*src
, struct pf_state_peer
*dst
)
1315 KASSERT((src
->scrub
== NULL
),
1316 ("pf_normalize_tcp_init: src->scrub != NULL"));
1318 src
->scrub
= pool_get(&pf_state_scrub_pl
, PR_NOWAIT
);
1319 if (src
->scrub
== NULL
)
1321 bzero(src
->scrub
, sizeof(*src
->scrub
));
1326 struct ip
*h
= mtod(m
, struct ip
*);
1327 src
->scrub
->pfss_ttl
= h
->ip_ttl
;
1333 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
1334 src
->scrub
->pfss_ttl
= h
->ip6_hlim
;
1342 * All normalizations below are only begun if we see the start of
1343 * the connections. They must all set an enabled bit in pfss_flags
1345 if ((th
->th_flags
& TH_SYN
) == 0)
1349 if (th
->th_off
> (sizeof(struct tcphdr
) >> 2) && src
->scrub
&&
1350 pf_pull_hdr(m
, off
, hdr
, th
->th_off
<< 2, NULL
, NULL
, pd
->af
)) {
1351 /* Diddle with TCP options */
1353 opt
= hdr
+ sizeof(struct tcphdr
);
1354 hlen
= (th
->th_off
<< 2) - sizeof(struct tcphdr
);
1355 while (hlen
>= TCPOLEN_TIMESTAMP
) {
1357 case TCPOPT_EOL
: /* FALLTHROUGH */
1362 case TCPOPT_TIMESTAMP
:
1363 if (opt
[1] >= TCPOLEN_TIMESTAMP
) {
1364 src
->scrub
->pfss_flags
|=
1366 src
->scrub
->pfss_ts_mod
= karc4random();
1370 hlen
-= MAX(opt
[1], 2);
1371 opt
+= MAX(opt
[1], 2);
1381 pf_normalize_tcp_cleanup(struct pf_state
*state
)
1383 if (state
->src
.scrub
)
1384 pool_put(&pf_state_scrub_pl
, state
->src
.scrub
);
1385 if (state
->dst
.scrub
)
1386 pool_put(&pf_state_scrub_pl
, state
->dst
.scrub
);
1388 /* Someday... flush the TCP segment reassembly descriptors. */
1392 pf_normalize_tcp_stateful(struct mbuf
*m
, int off
, struct pf_pdesc
*pd
,
1393 u_short
*reason
, struct tcphdr
*th
, struct pf_state_peer
*src
,
1394 struct pf_state_peer
*dst
, int *writeback
)
1400 KASSERT((src
->scrub
|| dst
->scrub
),
1401 ("pf_normalize_tcp_statefull: src->scrub && dst->scrub!"));
1404 * Enforce the minimum TTL seen for this connection. Negate a common
1405 * technique to evade an intrusion detection system and confuse
1406 * firewall state code.
1412 struct ip
*h
= mtod(m
, struct ip
*);
1413 if (h
->ip_ttl
> src
->scrub
->pfss_ttl
)
1414 src
->scrub
->pfss_ttl
= h
->ip_ttl
;
1415 h
->ip_ttl
= src
->scrub
->pfss_ttl
;
1423 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
1424 if (h
->ip6_hlim
> src
->scrub
->pfss_ttl
)
1425 src
->scrub
->pfss_ttl
= h
->ip6_hlim
;
1426 h
->ip6_hlim
= src
->scrub
->pfss_ttl
;
1433 if (th
->th_off
> (sizeof(struct tcphdr
) >> 2) &&
1434 ((src
->scrub
&& (src
->scrub
->pfss_flags
& PFSS_TIMESTAMP
)) ||
1435 (dst
->scrub
&& (dst
->scrub
->pfss_flags
& PFSS_TIMESTAMP
))) &&
1436 pf_pull_hdr(m
, off
, hdr
, th
->th_off
<< 2, NULL
, NULL
, pd
->af
)) {
1437 /* Diddle with TCP options */
1439 opt
= hdr
+ sizeof(struct tcphdr
);
1440 hlen
= (th
->th_off
<< 2) - sizeof(struct tcphdr
);
1441 while (hlen
>= TCPOLEN_TIMESTAMP
) {
1443 case TCPOPT_EOL
: /* FALLTHROUGH */
1448 case TCPOPT_TIMESTAMP
:
1449 /* Modulate the timestamps. Can be used for
1450 * NAT detection, OS uptime determination or
1453 if (opt
[1] >= TCPOLEN_TIMESTAMP
) {
1456 (src
->scrub
->pfss_flags
&
1458 memcpy(&ts_value
, &opt
[2],
1460 ts_value
= htonl(ntohl(ts_value
)
1461 + src
->scrub
->pfss_ts_mod
);
1462 pf_change_a(&opt
[2],
1463 &th
->th_sum
, ts_value
, 0);
1467 /* Modulate TS reply iff valid (!0) */
1468 memcpy(&ts_value
, &opt
[6],
1470 if (ts_value
&& dst
->scrub
&&
1471 (dst
->scrub
->pfss_flags
&
1473 ts_value
= htonl(ntohl(ts_value
)
1474 - dst
->scrub
->pfss_ts_mod
);
1475 pf_change_a(&opt
[6],
1476 &th
->th_sum
, ts_value
, 0);
1482 hlen
-= MAX(opt
[1], 2);
1483 opt
+= MAX(opt
[1], 2);
1488 /* Copyback the options, caller copys back header */
1490 m_copyback(m
, off
+ sizeof(struct tcphdr
),
1491 (th
->th_off
<< 2) - sizeof(struct tcphdr
), hdr
+
1492 sizeof(struct tcphdr
));
1497 /* I have a dream.... TCP segment reassembly.... */
1502 pf_normalize_tcpopt(struct pf_rule
*r
, struct mbuf
*m
, struct tcphdr
*th
,
1507 int opt
, cnt
, optlen
= 0;
1511 thoff
= th
->th_off
<< 2;
1512 cnt
= thoff
- sizeof(struct tcphdr
);
1513 optp
= mtod(m
, caddr_t
) + off
+ sizeof(struct tcphdr
);
1515 for (; cnt
> 0; cnt
-= optlen
, optp
+= optlen
) {
1517 if (opt
== TCPOPT_EOL
)
1519 if (opt
== TCPOPT_NOP
)
1525 if (optlen
< 2 || optlen
> cnt
)
1530 mss
= (u_int16_t
*)(optp
+ 2);
1531 if ((ntohs(*mss
)) > r
->max_mss
) {
1532 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
1533 *mss
, htons(r
->max_mss
));
1534 *mss
= htons(r
->max_mss
);