2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
34 #include <net/act_api.h>
36 #include <linux/tc_act/tc_csum.h>
37 #include <net/tc_act/tc_csum.h>
39 #define CSUM_TAB_MASK 15
40 static struct tcf_common
*tcf_csum_ht
[CSUM_TAB_MASK
+ 1];
41 static u32 csum_idx_gen
;
42 static DEFINE_RWLOCK(csum_lock
);
44 static struct tcf_hashinfo csum_hash_info
= {
46 .hmask
= CSUM_TAB_MASK
,
50 static const struct nla_policy csum_policy
[TCA_CSUM_MAX
+ 1] = {
51 [TCA_CSUM_PARMS
] = { .len
= sizeof(struct tc_csum
), },
54 static int tcf_csum_init(struct net
*n
, struct nlattr
*nla
, struct nlattr
*est
,
55 struct tc_action
*a
, int ovr
, int bind
)
57 struct nlattr
*tb
[TCA_CSUM_MAX
+ 1];
59 struct tcf_common
*pc
;
66 err
= nla_parse_nested(tb
, TCA_CSUM_MAX
, nla
, csum_policy
);
70 if (tb
[TCA_CSUM_PARMS
] == NULL
)
72 parm
= nla_data(tb
[TCA_CSUM_PARMS
]);
74 pc
= tcf_hash_check(parm
->index
, a
, bind
, &csum_hash_info
);
76 pc
= tcf_hash_create(parm
->index
, est
, a
, sizeof(*p
), bind
,
77 &csum_idx_gen
, &csum_hash_info
);
85 tcf_hash_release(pc
, bind
, &csum_hash_info
);
90 spin_lock_bh(&p
->tcf_lock
);
91 p
->tcf_action
= parm
->action
;
92 p
->update_flags
= parm
->update_flags
;
93 spin_unlock_bh(&p
->tcf_lock
);
95 if (ret
== ACT_P_CREATED
)
96 tcf_hash_insert(pc
, &csum_hash_info
);
101 static int tcf_csum_cleanup(struct tc_action
*a
, int bind
)
103 struct tcf_csum
*p
= a
->priv
;
104 return tcf_hash_release(&p
->common
, bind
, &csum_hash_info
);
108 * tcf_csum_skb_nextlayer - Get next layer pointer
109 * @skb: sk_buff to use
110 * @ihl: previous summed headers length
111 * @ipl: complete packet length
112 * @jhl: next header length
114 * Check the expected next layer availability in the specified sk_buff.
115 * Return the next layer pointer if pass, NULL otherwise.
117 static void *tcf_csum_skb_nextlayer(struct sk_buff
*skb
,
118 unsigned int ihl
, unsigned int ipl
,
121 int ntkoff
= skb_network_offset(skb
);
124 if (!pskb_may_pull(skb
, ipl
+ ntkoff
) || (ipl
< hl
) ||
126 !skb_clone_writable(skb
, hl
+ ntkoff
) &&
127 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)))
130 return (void *)(skb_network_header(skb
) + ihl
);
133 static int tcf_csum_ipv4_icmp(struct sk_buff
*skb
,
134 unsigned int ihl
, unsigned int ipl
)
136 struct icmphdr
*icmph
;
138 icmph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*icmph
));
143 skb
->csum
= csum_partial(icmph
, ipl
- ihl
, 0);
144 icmph
->checksum
= csum_fold(skb
->csum
);
146 skb
->ip_summed
= CHECKSUM_NONE
;
151 static int tcf_csum_ipv4_igmp(struct sk_buff
*skb
,
152 unsigned int ihl
, unsigned int ipl
)
154 struct igmphdr
*igmph
;
156 igmph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*igmph
));
161 skb
->csum
= csum_partial(igmph
, ipl
- ihl
, 0);
162 igmph
->csum
= csum_fold(skb
->csum
);
164 skb
->ip_summed
= CHECKSUM_NONE
;
169 static int tcf_csum_ipv6_icmp(struct sk_buff
*skb
,
170 unsigned int ihl
, unsigned int ipl
)
172 struct icmp6hdr
*icmp6h
;
173 const struct ipv6hdr
*ip6h
;
175 icmp6h
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*icmp6h
));
179 ip6h
= ipv6_hdr(skb
);
180 icmp6h
->icmp6_cksum
= 0;
181 skb
->csum
= csum_partial(icmp6h
, ipl
- ihl
, 0);
182 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
183 ipl
- ihl
, IPPROTO_ICMPV6
,
186 skb
->ip_summed
= CHECKSUM_NONE
;
191 static int tcf_csum_ipv4_tcp(struct sk_buff
*skb
,
192 unsigned int ihl
, unsigned int ipl
)
195 const struct iphdr
*iph
;
197 tcph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*tcph
));
203 skb
->csum
= csum_partial(tcph
, ipl
- ihl
, 0);
204 tcph
->check
= tcp_v4_check(ipl
- ihl
,
205 iph
->saddr
, iph
->daddr
, skb
->csum
);
207 skb
->ip_summed
= CHECKSUM_NONE
;
212 static int tcf_csum_ipv6_tcp(struct sk_buff
*skb
,
213 unsigned int ihl
, unsigned int ipl
)
216 const struct ipv6hdr
*ip6h
;
218 tcph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*tcph
));
222 ip6h
= ipv6_hdr(skb
);
224 skb
->csum
= csum_partial(tcph
, ipl
- ihl
, 0);
225 tcph
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
226 ipl
- ihl
, IPPROTO_TCP
,
229 skb
->ip_summed
= CHECKSUM_NONE
;
234 static int tcf_csum_ipv4_udp(struct sk_buff
*skb
,
235 unsigned int ihl
, unsigned int ipl
, int udplite
)
238 const struct iphdr
*iph
;
242 * Support both UDP and UDPLITE checksum algorithms, Don't use
243 * udph->len to get the real length without any protocol check,
244 * UDPLITE uses udph->len for another thing,
245 * Use iph->tot_len, or just ipl.
248 udph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*udph
));
253 ul
= ntohs(udph
->len
);
255 if (udplite
|| udph
->check
) {
261 skb
->csum
= csum_partial(udph
, ipl
- ihl
, 0);
262 else if ((ul
>= sizeof(*udph
)) && (ul
<= ipl
- ihl
))
263 skb
->csum
= csum_partial(udph
, ul
, 0);
265 goto ignore_obscure_skb
;
268 goto ignore_obscure_skb
;
270 skb
->csum
= csum_partial(udph
, ul
, 0);
273 udph
->check
= csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
278 udph
->check
= CSUM_MANGLED_0
;
281 skb
->ip_summed
= CHECKSUM_NONE
;
287 static int tcf_csum_ipv6_udp(struct sk_buff
*skb
,
288 unsigned int ihl
, unsigned int ipl
, int udplite
)
291 const struct ipv6hdr
*ip6h
;
295 * Support both UDP and UDPLITE checksum algorithms, Don't use
296 * udph->len to get the real length without any protocol check,
297 * UDPLITE uses udph->len for another thing,
298 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
301 udph
= tcf_csum_skb_nextlayer(skb
, ihl
, ipl
, sizeof(*udph
));
305 ip6h
= ipv6_hdr(skb
);
306 ul
= ntohs(udph
->len
);
312 skb
->csum
= csum_partial(udph
, ipl
- ihl
, 0);
314 else if ((ul
>= sizeof(*udph
)) && (ul
<= ipl
- ihl
))
315 skb
->csum
= csum_partial(udph
, ul
, 0);
318 goto ignore_obscure_skb
;
321 goto ignore_obscure_skb
;
323 skb
->csum
= csum_partial(udph
, ul
, 0);
326 udph
->check
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
, ul
,
327 udplite
? IPPROTO_UDPLITE
: IPPROTO_UDP
,
331 udph
->check
= CSUM_MANGLED_0
;
333 skb
->ip_summed
= CHECKSUM_NONE
;
339 static int tcf_csum_ipv4(struct sk_buff
*skb
, u32 update_flags
)
341 const struct iphdr
*iph
;
344 ntkoff
= skb_network_offset(skb
);
346 if (!pskb_may_pull(skb
, sizeof(*iph
) + ntkoff
))
351 switch (iph
->frag_off
& htons(IP_OFFSET
) ? 0 : iph
->protocol
) {
353 if (update_flags
& TCA_CSUM_UPDATE_FLAG_ICMP
)
354 if (!tcf_csum_ipv4_icmp(skb
, iph
->ihl
* 4,
355 ntohs(iph
->tot_len
)))
359 if (update_flags
& TCA_CSUM_UPDATE_FLAG_IGMP
)
360 if (!tcf_csum_ipv4_igmp(skb
, iph
->ihl
* 4,
361 ntohs(iph
->tot_len
)))
365 if (update_flags
& TCA_CSUM_UPDATE_FLAG_TCP
)
366 if (!tcf_csum_ipv4_tcp(skb
, iph
->ihl
* 4,
367 ntohs(iph
->tot_len
)))
371 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDP
)
372 if (!tcf_csum_ipv4_udp(skb
, iph
->ihl
* 4,
373 ntohs(iph
->tot_len
), 0))
376 case IPPROTO_UDPLITE
:
377 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDPLITE
)
378 if (!tcf_csum_ipv4_udp(skb
, iph
->ihl
* 4,
379 ntohs(iph
->tot_len
), 1))
384 if (update_flags
& TCA_CSUM_UPDATE_FLAG_IPV4HDR
) {
385 if (skb_cloned(skb
) &&
386 !skb_clone_writable(skb
, sizeof(*iph
) + ntkoff
) &&
387 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
390 ip_send_check(ip_hdr(skb
));
399 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr
*ip6xh
,
400 unsigned int ixhl
, unsigned int *pl
)
402 int off
, len
, optlen
;
403 unsigned char *xh
= (void *)ip6xh
;
405 off
= sizeof(*ip6xh
);
414 optlen
= xh
[off
+ 1] + 2;
415 if (optlen
!= 6 || len
< 6 || (off
& 3) != 2)
416 /* wrong jumbo option length/alignment */
418 *pl
= ntohl(*(__be32
*)(xh
+ off
+ 2));
421 optlen
= xh
[off
+ 1] + 2;
423 /* ignore obscure options */
435 static int tcf_csum_ipv6(struct sk_buff
*skb
, u32 update_flags
)
437 struct ipv6hdr
*ip6h
;
438 struct ipv6_opt_hdr
*ip6xh
;
439 unsigned int hl
, ixhl
;
444 ntkoff
= skb_network_offset(skb
);
448 if (!pskb_may_pull(skb
, hl
+ ntkoff
))
451 ip6h
= ipv6_hdr(skb
);
453 pl
= ntohs(ip6h
->payload_len
);
454 nexthdr
= ip6h
->nexthdr
;
458 case NEXTHDR_FRAGMENT
:
460 case NEXTHDR_ROUTING
:
463 if (!pskb_may_pull(skb
, hl
+ sizeof(*ip6xh
) + ntkoff
))
465 ip6xh
= (void *)(skb_network_header(skb
) + hl
);
466 ixhl
= ipv6_optlen(ip6xh
);
467 if (!pskb_may_pull(skb
, hl
+ ixhl
+ ntkoff
))
469 ip6xh
= (void *)(skb_network_header(skb
) + hl
);
470 if ((nexthdr
== NEXTHDR_HOP
) &&
471 !(tcf_csum_ipv6_hopopts(ip6xh
, ixhl
, &pl
)))
473 nexthdr
= ip6xh
->nexthdr
;
477 if (update_flags
& TCA_CSUM_UPDATE_FLAG_ICMP
)
478 if (!tcf_csum_ipv6_icmp(skb
,
479 hl
, pl
+ sizeof(*ip6h
)))
483 if (update_flags
& TCA_CSUM_UPDATE_FLAG_TCP
)
484 if (!tcf_csum_ipv6_tcp(skb
,
485 hl
, pl
+ sizeof(*ip6h
)))
489 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDP
)
490 if (!tcf_csum_ipv6_udp(skb
, hl
,
491 pl
+ sizeof(*ip6h
), 0))
494 case IPPROTO_UDPLITE
:
495 if (update_flags
& TCA_CSUM_UPDATE_FLAG_UDPLITE
)
496 if (!tcf_csum_ipv6_udp(skb
, hl
,
497 pl
+ sizeof(*ip6h
), 1))
503 } while (pskb_may_pull(skb
, hl
+ 1 + ntkoff
));
513 static int tcf_csum(struct sk_buff
*skb
,
514 const struct tc_action
*a
, struct tcf_result
*res
)
516 struct tcf_csum
*p
= a
->priv
;
520 spin_lock(&p
->tcf_lock
);
521 p
->tcf_tm
.lastuse
= jiffies
;
522 bstats_update(&p
->tcf_bstats
, skb
);
523 action
= p
->tcf_action
;
524 update_flags
= p
->update_flags
;
525 spin_unlock(&p
->tcf_lock
);
527 if (unlikely(action
== TC_ACT_SHOT
))
530 switch (skb
->protocol
) {
531 case cpu_to_be16(ETH_P_IP
):
532 if (!tcf_csum_ipv4(skb
, update_flags
))
535 case cpu_to_be16(ETH_P_IPV6
):
536 if (!tcf_csum_ipv6(skb
, update_flags
))
544 spin_lock(&p
->tcf_lock
);
545 p
->tcf_qstats
.drops
++;
546 spin_unlock(&p
->tcf_lock
);
550 static int tcf_csum_dump(struct sk_buff
*skb
,
551 struct tc_action
*a
, int bind
, int ref
)
553 unsigned char *b
= skb_tail_pointer(skb
);
554 struct tcf_csum
*p
= a
->priv
;
555 struct tc_csum opt
= {
556 .update_flags
= p
->update_flags
,
557 .index
= p
->tcf_index
,
558 .action
= p
->tcf_action
,
559 .refcnt
= p
->tcf_refcnt
- ref
,
560 .bindcnt
= p
->tcf_bindcnt
- bind
,
564 if (nla_put(skb
, TCA_CSUM_PARMS
, sizeof(opt
), &opt
))
565 goto nla_put_failure
;
566 t
.install
= jiffies_to_clock_t(jiffies
- p
->tcf_tm
.install
);
567 t
.lastuse
= jiffies_to_clock_t(jiffies
- p
->tcf_tm
.lastuse
);
568 t
.expires
= jiffies_to_clock_t(p
->tcf_tm
.expires
);
569 if (nla_put(skb
, TCA_CSUM_TM
, sizeof(t
), &t
))
570 goto nla_put_failure
;
579 static struct tc_action_ops act_csum_ops
= {
581 .hinfo
= &csum_hash_info
,
582 .type
= TCA_ACT_CSUM
,
583 .capab
= TCA_CAP_NONE
,
584 .owner
= THIS_MODULE
,
586 .dump
= tcf_csum_dump
,
587 .cleanup
= tcf_csum_cleanup
,
588 .lookup
= tcf_hash_search
,
589 .init
= tcf_csum_init
,
590 .walk
= tcf_generic_walker
593 MODULE_DESCRIPTION("Checksum updating actions");
594 MODULE_LICENSE("GPL");
596 static int __init
csum_init_module(void)
598 return tcf_register_action(&act_csum_ops
);
601 static void __exit
csum_cleanup_module(void)
603 tcf_unregister_action(&act_csum_ops
);
606 module_init(csum_init_module
);
607 module_exit(csum_cleanup_module
);