2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/socket.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/printk.h>
17 #include <net/protocol.h>
20 #include "ip6_offload.h"
22 static int ipv6_gso_pull_exthdrs(struct sk_buff
*skb
, int proto
)
24 const struct net_offload
*ops
= NULL
;
27 struct ipv6_opt_hdr
*opth
;
30 if (proto
!= NEXTHDR_HOP
) {
31 ops
= rcu_dereference(inet6_offloads
[proto
]);
36 if (!(ops
->flags
& INET6_PROTO_GSO_EXTHDR
))
40 if (unlikely(!pskb_may_pull(skb
, 8)))
43 opth
= (void *)skb
->data
;
44 len
= ipv6_optlen(opth
);
46 if (unlikely(!pskb_may_pull(skb
, len
)))
49 proto
= opth
->nexthdr
;
56 static int ipv6_gso_send_check(struct sk_buff
*skb
)
58 const struct ipv6hdr
*ipv6h
;
59 const struct net_offload
*ops
;
62 if (unlikely(!pskb_may_pull(skb
, sizeof(*ipv6h
))))
65 ipv6h
= ipv6_hdr(skb
);
66 __skb_pull(skb
, sizeof(*ipv6h
));
67 err
= -EPROTONOSUPPORT
;
70 ops
= rcu_dereference(inet6_offloads
[
71 ipv6_gso_pull_exthdrs(skb
, ipv6h
->nexthdr
)]);
73 if (likely(ops
&& ops
->callbacks
.gso_send_check
)) {
74 skb_reset_transport_header(skb
);
75 err
= ops
->callbacks
.gso_send_check(skb
);
83 static struct sk_buff
*ipv6_gso_segment(struct sk_buff
*skb
,
84 netdev_features_t features
)
86 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
87 struct ipv6hdr
*ipv6h
;
88 const struct net_offload
*ops
;
90 struct frag_hdr
*fptr
;
91 unsigned int unfrag_ip6hlen
;
95 if (!(features
& NETIF_F_V6_CSUM
))
96 features
&= ~NETIF_F_SG
;
98 if (unlikely(skb_shinfo(skb
)->gso_type
&
106 if (unlikely(!pskb_may_pull(skb
, sizeof(*ipv6h
))))
109 ipv6h
= ipv6_hdr(skb
);
110 __skb_pull(skb
, sizeof(*ipv6h
));
111 segs
= ERR_PTR(-EPROTONOSUPPORT
);
113 proto
= ipv6_gso_pull_exthdrs(skb
, ipv6h
->nexthdr
);
115 ops
= rcu_dereference(inet6_offloads
[proto
]);
116 if (likely(ops
&& ops
->callbacks
.gso_segment
)) {
117 skb_reset_transport_header(skb
);
118 segs
= ops
->callbacks
.gso_segment(skb
, features
);
125 for (skb
= segs
; skb
; skb
= skb
->next
) {
126 ipv6h
= ipv6_hdr(skb
);
127 ipv6h
->payload_len
= htons(skb
->len
- skb
->mac_len
-
129 if (proto
== IPPROTO_UDP
) {
130 unfrag_ip6hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
131 fptr
= (struct frag_hdr
*)(skb_network_header(skb
) +
133 fptr
->frag_off
= htons(offset
);
134 if (skb
->next
!= NULL
)
135 fptr
->frag_off
|= htons(IP6_MF
);
136 offset
+= (ntohs(ipv6h
->payload_len
) -
137 sizeof(struct frag_hdr
));
145 static struct sk_buff
**ipv6_gro_receive(struct sk_buff
**head
,
148 const struct net_offload
*ops
;
149 struct sk_buff
**pp
= NULL
;
159 off
= skb_gro_offset(skb
);
160 hlen
= off
+ sizeof(*iph
);
161 iph
= skb_gro_header_fast(skb
, off
);
162 if (skb_gro_header_hard(skb
, hlen
)) {
163 iph
= skb_gro_header_slow(skb
, hlen
, off
);
168 skb_gro_pull(skb
, sizeof(*iph
));
169 skb_set_transport_header(skb
, skb_gro_offset(skb
));
171 flush
+= ntohs(iph
->payload_len
) != skb_gro_len(skb
);
174 proto
= iph
->nexthdr
;
175 ops
= rcu_dereference(inet6_offloads
[proto
]);
176 if (!ops
|| !ops
->callbacks
.gro_receive
) {
177 __pskb_pull(skb
, skb_gro_offset(skb
));
178 proto
= ipv6_gso_pull_exthdrs(skb
, proto
);
179 skb_gro_pull(skb
, -skb_transport_offset(skb
));
180 skb_reset_transport_header(skb
);
181 __skb_push(skb
, skb_gro_offset(skb
));
183 ops
= rcu_dereference(inet6_offloads
[proto
]);
184 if (!ops
|| !ops
->callbacks
.gro_receive
)
190 NAPI_GRO_CB(skb
)->proto
= proto
;
193 nlen
= skb_network_header_len(skb
);
195 for (p
= *head
; p
; p
= p
->next
) {
196 const struct ipv6hdr
*iph2
;
197 __be32 first_word
; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
199 if (!NAPI_GRO_CB(p
)->same_flow
)
203 first_word
= *(__be32
*)iph
^ *(__be32
*)iph2
;
205 /* All fields must match except length and Traffic Class. */
206 if (nlen
!= skb_network_header_len(p
) ||
207 (first_word
& htonl(0xF00FFFFF)) ||
208 memcmp(&iph
->nexthdr
, &iph2
->nexthdr
,
209 nlen
- offsetof(struct ipv6hdr
, nexthdr
))) {
210 NAPI_GRO_CB(p
)->same_flow
= 0;
213 /* flush if Traffic Class fields are different */
214 NAPI_GRO_CB(p
)->flush
|= !!(first_word
& htonl(0x0FF00000));
215 NAPI_GRO_CB(p
)->flush
|= flush
;
218 NAPI_GRO_CB(skb
)->flush
|= flush
;
221 skb_postpull_rcsum(skb
, iph
, skb_network_header_len(skb
));
223 pp
= ops
->callbacks
.gro_receive(head
, skb
);
231 NAPI_GRO_CB(skb
)->flush
|= flush
;
236 static int ipv6_gro_complete(struct sk_buff
*skb
)
238 const struct net_offload
*ops
;
239 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
242 iph
->payload_len
= htons(skb
->len
- skb_network_offset(skb
) -
246 ops
= rcu_dereference(inet6_offloads
[NAPI_GRO_CB(skb
)->proto
]);
247 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
250 err
= ops
->callbacks
.gro_complete(skb
);
258 static struct packet_offload ipv6_packet_offload __read_mostly
= {
259 .type
= cpu_to_be16(ETH_P_IPV6
),
261 .gso_send_check
= ipv6_gso_send_check
,
262 .gso_segment
= ipv6_gso_segment
,
263 .gro_receive
= ipv6_gro_receive
,
264 .gro_complete
= ipv6_gro_complete
,
268 static int __init
ipv6_offload_init(void)
271 if (tcpv6_offload_init() < 0)
272 pr_crit("%s: Cannot add TCP protocol offload\n", __func__
);
273 if (udp_offload_init() < 0)
274 pr_crit("%s: Cannot add UDP protocol offload\n", __func__
);
275 if (ipv6_exthdrs_offload_init() < 0)
276 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__
);
278 dev_add_offload(&ipv6_packet_offload
);
282 fs_initcall(ipv6_offload_init
);