Merge branch 'for-3.11' of git://linux-nfs.org/~bfields/linux
[linux-2.6.git] / net / ipv4 / tcp_offload.c
blob3a7525e6c08633dad9424bc1a9cb13bf26dfec1e
1 /*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * TCPv4 GSO/GRO support
13 #include <linux/skbuff.h>
14 #include <net/tcp.h>
15 #include <net/protocol.h>
17 struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
18 netdev_features_t features)
20 struct sk_buff *segs = ERR_PTR(-EINVAL);
21 struct tcphdr *th;
22 unsigned int thlen;
23 unsigned int seq;
24 __be32 delta;
25 unsigned int oldlen;
26 unsigned int mss;
27 struct sk_buff *gso_skb = skb;
28 __sum16 newcheck;
29 bool ooo_okay, copy_destructor;
31 if (!pskb_may_pull(skb, sizeof(*th)))
32 goto out;
34 th = tcp_hdr(skb);
35 thlen = th->doff * 4;
36 if (thlen < sizeof(*th))
37 goto out;
39 if (!pskb_may_pull(skb, thlen))
40 goto out;
42 oldlen = (u16)~skb->len;
43 __skb_pull(skb, thlen);
45 mss = tcp_skb_mss(skb);
46 if (unlikely(skb->len <= mss))
47 goto out;
49 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
50 /* Packet is from an untrusted source, reset gso_segs. */
51 int type = skb_shinfo(skb)->gso_type;
53 if (unlikely(type &
54 ~(SKB_GSO_TCPV4 |
55 SKB_GSO_DODGY |
56 SKB_GSO_TCP_ECN |
57 SKB_GSO_TCPV6 |
58 SKB_GSO_GRE |
59 SKB_GSO_MPLS |
60 SKB_GSO_UDP_TUNNEL |
61 0) ||
62 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
63 goto out;
65 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
67 segs = NULL;
68 goto out;
71 copy_destructor = gso_skb->destructor == tcp_wfree;
72 ooo_okay = gso_skb->ooo_okay;
73 /* All segments but the first should have ooo_okay cleared */
74 skb->ooo_okay = 0;
76 segs = skb_segment(skb, features);
77 if (IS_ERR(segs))
78 goto out;
80 /* Only first segment might have ooo_okay set */
81 segs->ooo_okay = ooo_okay;
83 delta = htonl(oldlen + (thlen + mss));
85 skb = segs;
86 th = tcp_hdr(skb);
87 seq = ntohl(th->seq);
89 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
90 (__force u32)delta));
92 do {
93 th->fin = th->psh = 0;
94 th->check = newcheck;
96 if (skb->ip_summed != CHECKSUM_PARTIAL)
97 th->check =
98 csum_fold(csum_partial(skb_transport_header(skb),
99 thlen, skb->csum));
101 seq += mss;
102 if (copy_destructor) {
103 skb->destructor = gso_skb->destructor;
104 skb->sk = gso_skb->sk;
105 /* {tcp|sock}_wfree() use exact truesize accounting :
106 * sum(skb->truesize) MUST be exactly be gso_skb->truesize
107 * So we account mss bytes of 'true size' for each segment.
108 * The last segment will contain the remaining.
110 skb->truesize = mss;
111 gso_skb->truesize -= mss;
113 skb = skb->next;
114 th = tcp_hdr(skb);
116 th->seq = htonl(seq);
117 th->cwr = 0;
118 } while (skb->next);
120 /* Following permits TCP Small Queues to work well with GSO :
121 * The callback to TCP stack will be called at the time last frag
122 * is freed at TX completion, and not right now when gso_skb
123 * is freed by GSO engine
125 if (copy_destructor) {
126 swap(gso_skb->sk, skb->sk);
127 swap(gso_skb->destructor, skb->destructor);
128 swap(gso_skb->truesize, skb->truesize);
131 delta = htonl(oldlen + (skb_tail_pointer(skb) -
132 skb_transport_header(skb)) +
133 skb->data_len);
134 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
135 (__force u32)delta));
136 if (skb->ip_summed != CHECKSUM_PARTIAL)
137 th->check = csum_fold(csum_partial(skb_transport_header(skb),
138 thlen, skb->csum));
139 out:
140 return segs;
142 EXPORT_SYMBOL(tcp_tso_segment);
144 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
146 struct sk_buff **pp = NULL;
147 struct sk_buff *p;
148 struct tcphdr *th;
149 struct tcphdr *th2;
150 unsigned int len;
151 unsigned int thlen;
152 __be32 flags;
153 unsigned int mss = 1;
154 unsigned int hlen;
155 unsigned int off;
156 int flush = 1;
157 int i;
159 off = skb_gro_offset(skb);
160 hlen = off + sizeof(*th);
161 th = skb_gro_header_fast(skb, off);
162 if (skb_gro_header_hard(skb, hlen)) {
163 th = skb_gro_header_slow(skb, hlen, off);
164 if (unlikely(!th))
165 goto out;
168 thlen = th->doff * 4;
169 if (thlen < sizeof(*th))
170 goto out;
172 hlen = off + thlen;
173 if (skb_gro_header_hard(skb, hlen)) {
174 th = skb_gro_header_slow(skb, hlen, off);
175 if (unlikely(!th))
176 goto out;
179 skb_gro_pull(skb, thlen);
181 len = skb_gro_len(skb);
182 flags = tcp_flag_word(th);
184 for (; (p = *head); head = &p->next) {
185 if (!NAPI_GRO_CB(p)->same_flow)
186 continue;
188 th2 = tcp_hdr(p);
190 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
191 NAPI_GRO_CB(p)->same_flow = 0;
192 continue;
195 goto found;
198 goto out_check_final;
200 found:
201 flush = NAPI_GRO_CB(p)->flush;
202 flush |= (__force int)(flags & TCP_FLAG_CWR);
203 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
204 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
205 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
206 for (i = sizeof(*th); i < thlen; i += 4)
207 flush |= *(u32 *)((u8 *)th + i) ^
208 *(u32 *)((u8 *)th2 + i);
210 mss = tcp_skb_mss(p);
212 flush |= (len - 1) >= mss;
213 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
215 if (flush || skb_gro_receive(head, skb)) {
216 mss = 1;
217 goto out_check_final;
220 p = *head;
221 th2 = tcp_hdr(p);
222 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
224 out_check_final:
225 flush = len < mss;
226 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
227 TCP_FLAG_RST | TCP_FLAG_SYN |
228 TCP_FLAG_FIN));
230 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
231 pp = head;
233 out:
234 NAPI_GRO_CB(skb)->flush |= flush;
236 return pp;
238 EXPORT_SYMBOL(tcp_gro_receive);
240 int tcp_gro_complete(struct sk_buff *skb)
242 struct tcphdr *th = tcp_hdr(skb);
244 skb->csum_start = skb_transport_header(skb) - skb->head;
245 skb->csum_offset = offsetof(struct tcphdr, check);
246 skb->ip_summed = CHECKSUM_PARTIAL;
248 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
250 if (th->cwr)
251 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
253 return 0;
255 EXPORT_SYMBOL(tcp_gro_complete);
257 static int tcp_v4_gso_send_check(struct sk_buff *skb)
259 const struct iphdr *iph;
260 struct tcphdr *th;
262 if (!pskb_may_pull(skb, sizeof(*th)))
263 return -EINVAL;
265 iph = ip_hdr(skb);
266 th = tcp_hdr(skb);
268 th->check = 0;
269 skb->ip_summed = CHECKSUM_PARTIAL;
270 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
271 return 0;
274 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
276 const struct iphdr *iph = skb_gro_network_header(skb);
277 __wsum wsum;
278 __sum16 sum;
280 switch (skb->ip_summed) {
281 case CHECKSUM_COMPLETE:
282 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
283 skb->csum)) {
284 skb->ip_summed = CHECKSUM_UNNECESSARY;
285 break;
287 flush:
288 NAPI_GRO_CB(skb)->flush = 1;
289 return NULL;
291 case CHECKSUM_NONE:
292 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
293 skb_gro_len(skb), IPPROTO_TCP, 0);
294 sum = csum_fold(skb_checksum(skb,
295 skb_gro_offset(skb),
296 skb_gro_len(skb),
297 wsum));
298 if (sum)
299 goto flush;
301 skb->ip_summed = CHECKSUM_UNNECESSARY;
302 break;
305 return tcp_gro_receive(head, skb);
308 static int tcp4_gro_complete(struct sk_buff *skb)
310 const struct iphdr *iph = ip_hdr(skb);
311 struct tcphdr *th = tcp_hdr(skb);
313 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
314 iph->saddr, iph->daddr, 0);
315 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
317 return tcp_gro_complete(skb);
320 static const struct net_offload tcpv4_offload = {
321 .callbacks = {
322 .gso_send_check = tcp_v4_gso_send_check,
323 .gso_segment = tcp_tso_segment,
324 .gro_receive = tcp4_gro_receive,
325 .gro_complete = tcp4_gro_complete,
329 int __init tcpv4_offload_init(void)
331 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);