NETFILTER: remove unnecessary goto statement for error recovery
[tomato.git] / release / src-rt / linux / linux-2.6 / net / ipv4 / netfilter / nf_nat_helper.c
blob6c6d8645fa57920eb7c6352e5f1569e6b5c17d9a
1 /* ip_nat_helper.c - generic support functions for NAT helpers
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kmod.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <net/checksum.h>
18 #include <net/tcp.h>
20 #include <linux/netfilter_ipv4.h>
21 #include <net/netfilter/nf_conntrack.h>
22 #include <net/netfilter/nf_conntrack_helper.h>
23 #include <net/netfilter/nf_conntrack_expect.h>
24 #include <net/netfilter/nf_nat.h>
25 #include <net/netfilter/nf_nat_protocol.h>
26 #include <net/netfilter/nf_nat_core.h>
27 #include <net/netfilter/nf_nat_helper.h>
29 #if 0
30 #define DEBUGP printk
31 #define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos);
32 #else
33 #define DEBUGP(format, args...)
34 #define DUMP_OFFSET(x)
35 #endif
37 static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
39 /* Setup TCP sequence correction given this change at this sequence */
40 static inline void
41 adjust_tcp_sequence(u32 seq,
42 int sizediff,
43 struct nf_conn *ct,
44 enum ip_conntrack_info ctinfo)
46 int dir;
47 struct nf_nat_seq *this_way, *other_way;
48 struct nf_conn_nat *nat = nfct_nat(ct);
50 DEBUGP("nf_nat_resize_packet: old_size = %u, new_size = %u\n",
51 (*skb)->len, new_size);
53 dir = CTINFO2DIR(ctinfo);
55 this_way = &nat->info.seq[dir];
56 other_way = &nat->info.seq[!dir];
58 DEBUGP("nf_nat_resize_packet: Seq_offset before: ");
59 DUMP_OFFSET(this_way);
61 spin_lock_bh(&nf_nat_seqofs_lock);
63 /* SYN adjust. If it's uninitialized, or this is after last
64 * correction, record it: we don't handle more than one
65 * adjustment in the window, but do deal with common case of a
66 * retransmit */
67 if (this_way->offset_before == this_way->offset_after ||
68 before(this_way->correction_pos, seq)) {
69 this_way->correction_pos = seq;
70 this_way->offset_before = this_way->offset_after;
71 this_way->offset_after += sizediff;
73 spin_unlock_bh(&nf_nat_seqofs_lock);
75 DEBUGP("nf_nat_resize_packet: Seq_offset after: ");
76 DUMP_OFFSET(this_way);
79 /* Get the offset value, for conntrack */
80 s16 nf_nat_get_offset(const struct nf_conn *ct,
81 enum ip_conntrack_dir dir,
82 u32 seq)
84 struct nf_conn_nat *nat = nfct_nat(ct);
85 struct nf_nat_seq *this_way;
86 s16 offset;
88 if (!nat)
89 return 0;
91 this_way = &nat->info.seq[dir];
92 spin_lock_bh(&nf_nat_seqofs_lock);
93 offset = after(seq, this_way->correction_pos)
94 ? this_way->offset_after : this_way->offset_before;
95 spin_unlock_bh(&nf_nat_seqofs_lock);
97 return offset;
99 EXPORT_SYMBOL_GPL(nf_nat_get_offset);
101 /* Frobs data inside this packet, which is linear. */
102 static void mangle_contents(struct sk_buff *skb,
103 unsigned int dataoff,
104 unsigned int match_offset,
105 unsigned int match_len,
106 const char *rep_buffer,
107 unsigned int rep_len)
109 unsigned char *data;
111 BUG_ON(skb_is_nonlinear(skb));
112 data = skb_network_header(skb) + dataoff;
114 /* move post-replacement */
115 memmove(data + match_offset + rep_len,
116 data + match_offset + match_len,
117 skb->tail - (skb->network_header + dataoff +
118 match_offset + match_len));
120 /* insert data from buffer */
121 memcpy(data + match_offset, rep_buffer, rep_len);
123 /* update skb info */
124 if (rep_len > match_len) {
125 DEBUGP("nf_nat_mangle_packet: Extending packet by "
126 "%u from %u bytes\n", rep_len - match_len,
127 skb->len);
128 skb_put(skb, rep_len - match_len);
129 } else {
130 DEBUGP("nf_nat_mangle_packet: Shrinking packet from "
131 "%u from %u bytes\n", match_len - rep_len,
132 skb->len);
133 __skb_trim(skb, skb->len + rep_len - match_len);
136 /* fix IP hdr checksum information */
137 ip_hdr(skb)->tot_len = htons(skb->len);
138 ip_send_check(ip_hdr(skb));
141 /* Unusual, but possible case. */
142 static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
144 if (skb->len + extra > 65535)
145 return 0;
147 if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
148 return 0;
150 return 1;
153 void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
154 __be32 seq, s16 off)
156 if (!off)
157 return;
158 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
159 adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
161 EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
163 /* Generic function for mangling variable-length address changes inside
164 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
165 * command in FTP).
167 * Takes care about all the nasty sequence number changes, checksumming,
168 * skb enlargement, ...
170 * */
171 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
172 struct nf_conn *ct,
173 enum ip_conntrack_info ctinfo,
174 unsigned int match_offset,
175 unsigned int match_len,
176 const char *rep_buffer,
177 unsigned int rep_len, bool adjust)
179 struct rtable *rt = (struct rtable *)skb->dst;
180 struct iphdr *iph;
181 struct tcphdr *tcph;
182 int oldlen, datalen;
184 if (!skb_make_writable(skb, skb->len))
185 return 0;
187 if (rep_len > match_len &&
188 rep_len - match_len > skb_tailroom(skb) &&
189 !enlarge_skb(skb, rep_len - match_len))
190 return 0;
192 SKB_LINEAR_ASSERT(skb);
194 iph = ip_hdr(skb);
195 tcph = (void *)iph + iph->ihl*4;
197 oldlen = skb->len - iph->ihl*4;
198 mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
199 match_offset, match_len, rep_buffer, rep_len);
201 datalen = skb->len - iph->ihl*4;
202 if (skb->ip_summed != CHECKSUM_PARTIAL) {
203 if (!(rt->rt_flags & RTCF_LOCAL) &&
204 skb->dev->features & NETIF_F_ALL_CSUM) {
205 skb->ip_summed = CHECKSUM_PARTIAL;
206 skb->csum_start = skb_headroom(skb) +
207 skb_network_offset(skb) +
208 iph->ihl * 4;
209 skb->csum_offset = offsetof(struct tcphdr, check);
210 tcph->check = ~tcp_v4_check(datalen,
211 iph->saddr, iph->daddr, 0);
212 } else {
213 tcph->check = 0;
214 tcph->check = tcp_v4_check(datalen,
215 iph->saddr, iph->daddr,
216 csum_partial((char *)tcph,
217 datalen, 0));
219 } else
220 nf_proto_csum_replace2(&tcph->check, skb,
221 htons(oldlen), htons(datalen), 1);
223 if (adjust && rep_len != match_len)
224 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
225 (int)rep_len - (int)match_len);
227 return 1;
229 EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
231 /* Generic function for mangling variable-length address changes inside
232 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
233 * command in the Amanda protocol)
235 * Takes care about all the nasty sequence number changes, checksumming,
236 * skb enlargement, ...
238 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
239 * should be fairly easy to do.
242 nf_nat_mangle_udp_packet(struct sk_buff *skb,
243 struct nf_conn *ct,
244 enum ip_conntrack_info ctinfo,
245 unsigned int match_offset,
246 unsigned int match_len,
247 const char *rep_buffer,
248 unsigned int rep_len)
250 struct rtable *rt = (struct rtable *)skb->dst;
251 struct iphdr *iph;
252 struct udphdr *udph;
253 int datalen, oldlen;
255 /* UDP helpers might accidentally mangle the wrong packet */
256 iph = ip_hdr(skb);
257 if (skb->len < iph->ihl*4 + sizeof(*udph) +
258 match_offset + match_len)
259 return 0;
261 if (!skb_make_writable(skb, skb->len))
262 return 0;
264 if (rep_len > match_len &&
265 rep_len - match_len > skb_tailroom(skb) &&
266 !enlarge_skb(skb, rep_len - match_len))
267 return 0;
269 iph = ip_hdr(skb);
270 udph = (void *)iph + iph->ihl*4;
272 oldlen = skb->len - iph->ihl*4;
273 mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
274 match_offset, match_len, rep_buffer, rep_len);
276 /* update the length of the UDP packet */
277 datalen = skb->len - iph->ihl*4;
278 udph->len = htons(datalen);
280 /* fix udp checksum if udp checksum was previously calculated */
281 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
282 return 1;
284 if (skb->ip_summed != CHECKSUM_PARTIAL) {
285 if (!(rt->rt_flags & RTCF_LOCAL) &&
286 skb->dev->features & NETIF_F_ALL_CSUM) {
287 skb->ip_summed = CHECKSUM_PARTIAL;
288 skb->csum_start = skb_headroom(skb) +
289 skb_network_offset(skb) +
290 iph->ihl * 4;
291 skb->csum_offset = offsetof(struct udphdr, check);
292 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
293 datalen, IPPROTO_UDP,
295 } else {
296 udph->check = 0;
297 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
298 datalen, IPPROTO_UDP,
299 csum_partial((char *)udph,
300 datalen, 0));
301 if (!udph->check)
302 udph->check = CSUM_MANGLED_0;
304 } else
305 nf_proto_csum_replace2(&udph->check, skb,
306 htons(oldlen), htons(datalen), 1);
308 return 1;
310 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
312 /* Adjust one found SACK option including checksum correction */
313 static void
314 sack_adjust(struct sk_buff *skb,
315 struct tcphdr *tcph,
316 unsigned int sackoff,
317 unsigned int sackend,
318 struct nf_nat_seq *natseq)
320 while (sackoff < sackend) {
321 struct tcp_sack_block_wire *sack;
322 __be32 new_start_seq, new_end_seq;
324 sack = (void *)skb->data + sackoff;
325 if (after(ntohl(sack->start_seq) - natseq->offset_before,
326 natseq->correction_pos))
327 new_start_seq = htonl(ntohl(sack->start_seq)
328 - natseq->offset_after);
329 else
330 new_start_seq = htonl(ntohl(sack->start_seq)
331 - natseq->offset_before);
333 if (after(ntohl(sack->end_seq) - natseq->offset_before,
334 natseq->correction_pos))
335 new_end_seq = htonl(ntohl(sack->end_seq)
336 - natseq->offset_after);
337 else
338 new_end_seq = htonl(ntohl(sack->end_seq)
339 - natseq->offset_before);
341 DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
342 ntohl(sack->start_seq), new_start_seq,
343 ntohl(sack->end_seq), new_end_seq);
345 nf_proto_csum_replace4(&tcph->check, skb,
346 sack->start_seq, new_start_seq, 0);
347 nf_proto_csum_replace4(&tcph->check, skb,
348 sack->end_seq, new_end_seq, 0);
349 sack->start_seq = new_start_seq;
350 sack->end_seq = new_end_seq;
351 sackoff += sizeof(*sack);
355 /* TCP SACK sequence number adjustment */
356 static inline unsigned int
357 nf_nat_sack_adjust(struct sk_buff *skb,
358 struct tcphdr *tcph,
359 struct nf_conn *ct,
360 enum ip_conntrack_info ctinfo)
362 unsigned int dir, optoff, optend;
363 struct nf_conn_nat *nat = nfct_nat(ct);
365 optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
366 optend = ip_hdrlen(skb) + tcph->doff * 4;
368 if (!skb_make_writable(skb, optend))
369 return 0;
371 dir = CTINFO2DIR(ctinfo);
373 while (optoff < optend) {
374 /* Usually: option, length. */
375 unsigned char *op = skb->data + optoff;
377 switch (op[0]) {
378 case TCPOPT_EOL:
379 return 1;
380 case TCPOPT_NOP:
381 optoff++;
382 continue;
383 default:
384 /* no partial options */
385 if (optoff + 1 == optend ||
386 optoff + op[1] > optend ||
387 op[1] < 2)
388 return 0;
389 if (op[0] == TCPOPT_SACK &&
390 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
391 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
392 sack_adjust(skb, tcph, optoff+2,
393 optoff+op[1],
394 &nat->info.seq[!dir]);
395 optoff += op[1];
398 return 1;
401 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
403 nf_nat_seq_adjust(struct sk_buff *skb,
404 struct nf_conn *ct,
405 enum ip_conntrack_info ctinfo)
407 struct tcphdr *tcph;
408 int dir;
409 __be32 newseq, newack;
410 s16 seqoff, ackoff;
411 struct nf_conn_nat *nat = nfct_nat(ct);
412 struct nf_nat_seq *this_way, *other_way;
414 dir = CTINFO2DIR(ctinfo);
416 this_way = &nat->info.seq[dir];
417 other_way = &nat->info.seq[!dir];
419 if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
420 return 0;
422 tcph = (void *)skb->data + ip_hdrlen(skb);
423 if (after(ntohl(tcph->seq), this_way->correction_pos))
424 seqoff = this_way->offset_after;
425 else
426 seqoff = this_way->offset_before;
428 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
429 other_way->correction_pos))
430 ackoff = other_way->offset_after;
431 else
432 ackoff = other_way->offset_before;
434 newseq = htonl(ntohl(tcph->seq) + seqoff);
435 newack = htonl(ntohl(tcph->ack_seq) - ackoff);
437 nf_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
438 nf_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
440 DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
441 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
442 ntohl(newack));
444 tcph->seq = newseq;
445 tcph->ack_seq = newack;
447 return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
449 EXPORT_SYMBOL(nf_nat_seq_adjust);
451 /* Setup NAT on this expected conntrack so it follows master. */
452 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
453 void nf_nat_follow_master(struct nf_conn *ct,
454 struct nf_conntrack_expect *exp)
456 struct nf_nat_range range;
458 /* This must be a fresh one. */
459 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
461 /* Change src to where master sends to */
462 range.flags = IP_NAT_RANGE_MAP_IPS;
463 range.min_ip = range.max_ip
464 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
465 /* hook doesn't matter, but it has to do source manip */
466 nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
468 /* For DST manip, map port here to where it's expected. */
469 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
470 range.min = range.max = exp->saved_proto;
471 range.min_ip = range.max_ip
472 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
473 /* hook doesn't matter, but it has to do destination manip */
474 nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
476 EXPORT_SYMBOL(nf_nat_follow_master);