1 /* ip_nat_helper.c - generic support functions for NAT helpers
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/gfp.h>
12 #include <linux/kmod.h>
13 #include <linux/types.h>
14 #include <linux/timer.h>
15 #include <linux/skbuff.h>
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
18 #include <net/checksum.h>
20 #include <net/route.h>
22 #include <linux/netfilter_ipv4.h>
23 #include <net/netfilter/nf_conntrack.h>
24 #include <net/netfilter/nf_conntrack_helper.h>
25 #include <net/netfilter/nf_conntrack_ecache.h>
26 #include <net/netfilter/nf_conntrack_expect.h>
27 #include <net/netfilter/nf_nat.h>
28 #include <net/netfilter/nf_nat_protocol.h>
29 #include <net/netfilter/nf_nat_core.h>
30 #include <net/netfilter/nf_nat_helper.h>
32 #define DUMP_OFFSET(x) \
33 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
34 x->offset_before, x->offset_after, x->correction_pos);
36 static DEFINE_SPINLOCK(nf_nat_seqofs_lock
);
38 /* Setup TCP sequence correction given this change at this sequence */
40 adjust_tcp_sequence(u32 seq
,
43 enum ip_conntrack_info ctinfo
)
45 enum ip_conntrack_dir dir
= CTINFO2DIR(ctinfo
);
46 struct nf_conn_nat
*nat
= nfct_nat(ct
);
47 struct nf_nat_seq
*this_way
= &nat
->seq
[dir
];
49 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
52 pr_debug("adjust_tcp_sequence: Seq_offset before: ");
53 DUMP_OFFSET(this_way
);
55 spin_lock_bh(&nf_nat_seqofs_lock
);
57 /* SYN adjust. If it's uninitialized, or this is after last
58 * correction, record it: we don't handle more than one
59 * adjustment in the window, but do deal with common case of a
61 if (this_way
->offset_before
== this_way
->offset_after
||
62 before(this_way
->correction_pos
, seq
)) {
63 this_way
->correction_pos
= seq
;
64 this_way
->offset_before
= this_way
->offset_after
;
65 this_way
->offset_after
+= sizediff
;
67 spin_unlock_bh(&nf_nat_seqofs_lock
);
69 pr_debug("adjust_tcp_sequence: Seq_offset after: ");
70 DUMP_OFFSET(this_way
);
73 /* Get the offset value, for conntrack */
74 s16
nf_nat_get_offset(const struct nf_conn
*ct
,
75 enum ip_conntrack_dir dir
,
78 struct nf_conn_nat
*nat
= nfct_nat(ct
);
79 struct nf_nat_seq
*this_way
;
85 this_way
= &nat
->seq
[dir
];
86 spin_lock_bh(&nf_nat_seqofs_lock
);
87 offset
= after(seq
, this_way
->correction_pos
)
88 ? this_way
->offset_after
: this_way
->offset_before
;
89 spin_unlock_bh(&nf_nat_seqofs_lock
);
93 EXPORT_SYMBOL_GPL(nf_nat_get_offset
);
95 /* Frobs data inside this packet, which is linear. */
96 static void mangle_contents(struct sk_buff
*skb
,
98 unsigned int match_offset
,
99 unsigned int match_len
,
100 const char *rep_buffer
,
101 unsigned int rep_len
)
105 BUG_ON(skb_is_nonlinear(skb
));
106 data
= skb_network_header(skb
) + dataoff
;
108 /* move post-replacement */
109 memmove(data
+ match_offset
+ rep_len
,
110 data
+ match_offset
+ match_len
,
111 skb
->tail
- (skb
->network_header
+ dataoff
+
112 match_offset
+ match_len
));
114 /* insert data from buffer */
115 memcpy(data
+ match_offset
, rep_buffer
, rep_len
);
117 /* update skb info */
118 if (rep_len
> match_len
) {
119 pr_debug("nf_nat_mangle_packet: Extending packet by "
120 "%u from %u bytes\n", rep_len
- match_len
, skb
->len
);
121 skb_put(skb
, rep_len
- match_len
);
123 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
124 "%u from %u bytes\n", match_len
- rep_len
, skb
->len
);
125 __skb_trim(skb
, skb
->len
+ rep_len
- match_len
);
128 /* fix IP hdr checksum information */
129 ip_hdr(skb
)->tot_len
= htons(skb
->len
);
130 ip_send_check(ip_hdr(skb
));
133 /* Unusual, but possible case. */
134 static int enlarge_skb(struct sk_buff
*skb
, unsigned int extra
)
136 if (skb
->len
+ extra
> 65535)
139 if (pskb_expand_head(skb
, 0, extra
- skb_tailroom(skb
), GFP_ATOMIC
))
145 void nf_nat_set_seq_adjust(struct nf_conn
*ct
, enum ip_conntrack_info ctinfo
,
150 set_bit(IPS_SEQ_ADJUST_BIT
, &ct
->status
);
151 adjust_tcp_sequence(ntohl(seq
), off
, ct
, ctinfo
);
152 nf_conntrack_event_cache(IPCT_NATSEQADJ
, ct
);
154 EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust
);
156 int __nf_nat_mangle_tcp_packet(struct sk_buff
*skb
,
158 enum ip_conntrack_info ctinfo
,
159 unsigned int match_offset
,
160 unsigned int match_len
,
161 const char *rep_buffer
,
162 unsigned int rep_len
, bool adjust
)
164 struct rtable
*rt
= skb_rtable(skb
);
169 if (!skb_make_writable(skb
, skb
->len
))
172 if (rep_len
> match_len
&&
173 rep_len
- match_len
> skb_tailroom(skb
) &&
174 !enlarge_skb(skb
, rep_len
- match_len
))
177 SKB_LINEAR_ASSERT(skb
);
180 tcph
= (void *)iph
+ iph
->ihl
*4;
182 oldlen
= skb
->len
- iph
->ihl
*4;
183 mangle_contents(skb
, iph
->ihl
*4 + tcph
->doff
*4,
184 match_offset
, match_len
, rep_buffer
, rep_len
);
186 datalen
= skb
->len
- iph
->ihl
*4;
187 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
188 if (!(rt
->rt_flags
& RTCF_LOCAL
) &&
189 skb
->dev
->features
& NETIF_F_V4_CSUM
) {
190 skb
->ip_summed
= CHECKSUM_PARTIAL
;
191 skb
->csum_start
= skb_headroom(skb
) +
192 skb_network_offset(skb
) +
194 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
195 tcph
->check
= ~tcp_v4_check(datalen
,
196 iph
->saddr
, iph
->daddr
, 0);
199 tcph
->check
= tcp_v4_check(datalen
,
200 iph
->saddr
, iph
->daddr
,
205 inet_proto_csum_replace2(&tcph
->check
, skb
,
206 htons(oldlen
), htons(datalen
), 1);
208 if (adjust
&& rep_len
!= match_len
)
209 nf_nat_set_seq_adjust(ct
, ctinfo
, tcph
->seq
,
210 (int)rep_len
- (int)match_len
);
214 EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet
);
217 nf_nat_mangle_udp_packet(struct sk_buff
*skb
,
219 enum ip_conntrack_info ctinfo
,
220 unsigned int match_offset
,
221 unsigned int match_len
,
222 const char *rep_buffer
,
223 unsigned int rep_len
)
225 struct rtable
*rt
= skb_rtable(skb
);
230 /* UDP helpers might accidentally mangle the wrong packet */
232 if (skb
->len
< iph
->ihl
*4 + sizeof(*udph
) +
233 match_offset
+ match_len
)
236 if (!skb_make_writable(skb
, skb
->len
))
239 if (rep_len
> match_len
&&
240 rep_len
- match_len
> skb_tailroom(skb
) &&
241 !enlarge_skb(skb
, rep_len
- match_len
))
245 udph
= (void *)iph
+ iph
->ihl
*4;
247 oldlen
= skb
->len
- iph
->ihl
*4;
248 mangle_contents(skb
, iph
->ihl
*4 + sizeof(*udph
),
249 match_offset
, match_len
, rep_buffer
, rep_len
);
251 /* update the length of the UDP packet */
252 datalen
= skb
->len
- iph
->ihl
*4;
253 udph
->len
= htons(datalen
);
255 /* fix udp checksum if udp checksum was previously calculated */
256 if (!udph
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
)
259 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
260 if (!(rt
->rt_flags
& RTCF_LOCAL
) &&
261 skb
->dev
->features
& NETIF_F_V4_CSUM
) {
262 skb
->ip_summed
= CHECKSUM_PARTIAL
;
263 skb
->csum_start
= skb_headroom(skb
) +
264 skb_network_offset(skb
) +
266 skb
->csum_offset
= offsetof(struct udphdr
, check
);
267 udph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
268 datalen
, IPPROTO_UDP
,
272 udph
->check
= csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
273 datalen
, IPPROTO_UDP
,
277 udph
->check
= CSUM_MANGLED_0
;
280 inet_proto_csum_replace2(&udph
->check
, skb
,
281 htons(oldlen
), htons(datalen
), 1);
285 EXPORT_SYMBOL(nf_nat_mangle_udp_packet
);
287 /* Adjust one found SACK option including checksum correction */
289 sack_adjust(struct sk_buff
*skb
,
291 unsigned int sackoff
,
292 unsigned int sackend
,
293 struct nf_nat_seq
*natseq
)
295 while (sackoff
< sackend
) {
296 struct tcp_sack_block_wire
*sack
;
297 __be32 new_start_seq
, new_end_seq
;
299 sack
= (void *)skb
->data
+ sackoff
;
300 if (after(ntohl(sack
->start_seq
) - natseq
->offset_before
,
301 natseq
->correction_pos
))
302 new_start_seq
= htonl(ntohl(sack
->start_seq
)
303 - natseq
->offset_after
);
305 new_start_seq
= htonl(ntohl(sack
->start_seq
)
306 - natseq
->offset_before
);
308 if (after(ntohl(sack
->end_seq
) - natseq
->offset_before
,
309 natseq
->correction_pos
))
310 new_end_seq
= htonl(ntohl(sack
->end_seq
)
311 - natseq
->offset_after
);
313 new_end_seq
= htonl(ntohl(sack
->end_seq
)
314 - natseq
->offset_before
);
316 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
317 ntohl(sack
->start_seq
), new_start_seq
,
318 ntohl(sack
->end_seq
), new_end_seq
);
320 inet_proto_csum_replace4(&tcph
->check
, skb
,
321 sack
->start_seq
, new_start_seq
, 0);
322 inet_proto_csum_replace4(&tcph
->check
, skb
,
323 sack
->end_seq
, new_end_seq
, 0);
324 sack
->start_seq
= new_start_seq
;
325 sack
->end_seq
= new_end_seq
;
326 sackoff
+= sizeof(*sack
);
330 /* TCP SACK sequence number adjustment */
331 static inline unsigned int
332 nf_nat_sack_adjust(struct sk_buff
*skb
,
335 enum ip_conntrack_info ctinfo
)
337 unsigned int dir
, optoff
, optend
;
338 struct nf_conn_nat
*nat
= nfct_nat(ct
);
340 optoff
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
341 optend
= ip_hdrlen(skb
) + tcph
->doff
* 4;
343 if (!skb_make_writable(skb
, optend
))
346 dir
= CTINFO2DIR(ctinfo
);
348 while (optoff
< optend
) {
349 /* Usually: option, length. */
350 unsigned char *op
= skb
->data
+ optoff
;
359 /* no partial options */
360 if (optoff
+ 1 == optend
||
361 optoff
+ op
[1] > optend
||
364 if (op
[0] == TCPOPT_SACK
&&
365 op
[1] >= 2+TCPOLEN_SACK_PERBLOCK
&&
366 ((op
[1] - 2) % TCPOLEN_SACK_PERBLOCK
) == 0)
367 sack_adjust(skb
, tcph
, optoff
+2,
368 optoff
+op
[1], &nat
->seq
[!dir
]);
375 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
377 nf_nat_seq_adjust(struct sk_buff
*skb
,
379 enum ip_conntrack_info ctinfo
)
383 __be32 newseq
, newack
;
385 struct nf_conn_nat
*nat
= nfct_nat(ct
);
386 struct nf_nat_seq
*this_way
, *other_way
;
388 dir
= CTINFO2DIR(ctinfo
);
390 this_way
= &nat
->seq
[dir
];
391 other_way
= &nat
->seq
[!dir
];
393 if (!skb_make_writable(skb
, ip_hdrlen(skb
) + sizeof(*tcph
)))
396 tcph
= (void *)skb
->data
+ ip_hdrlen(skb
);
397 if (after(ntohl(tcph
->seq
), this_way
->correction_pos
))
398 seqoff
= this_way
->offset_after
;
400 seqoff
= this_way
->offset_before
;
402 if (after(ntohl(tcph
->ack_seq
) - other_way
->offset_before
,
403 other_way
->correction_pos
))
404 ackoff
= other_way
->offset_after
;
406 ackoff
= other_way
->offset_before
;
408 newseq
= htonl(ntohl(tcph
->seq
) + seqoff
);
409 newack
= htonl(ntohl(tcph
->ack_seq
) - ackoff
);
411 inet_proto_csum_replace4(&tcph
->check
, skb
, tcph
->seq
, newseq
, 0);
412 inet_proto_csum_replace4(&tcph
->check
, skb
, tcph
->ack_seq
, newack
, 0);
414 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
415 ntohl(tcph
->seq
), ntohl(newseq
), ntohl(tcph
->ack_seq
),
419 tcph
->ack_seq
= newack
;
421 return nf_nat_sack_adjust(skb
, tcph
, ct
, ctinfo
);
424 /* Setup NAT on this expected conntrack so it follows master. */
425 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
426 void nf_nat_follow_master(struct nf_conn
*ct
,
427 struct nf_conntrack_expect
*exp
)
429 struct nf_nat_range range
;
431 /* This must be a fresh one. */
432 BUG_ON(ct
->status
& IPS_NAT_DONE_MASK
);
434 /* Change src to where master sends to */
435 range
.flags
= IP_NAT_RANGE_MAP_IPS
;
436 range
.min_ip
= range
.max_ip
437 = ct
->master
->tuplehash
[!exp
->dir
].tuple
.dst
.u3
.ip
;
438 nf_nat_setup_info(ct
, &range
, IP_NAT_MANIP_SRC
);
440 /* For DST manip, map port here to where it's expected. */
441 range
.flags
= (IP_NAT_RANGE_MAP_IPS
| IP_NAT_RANGE_PROTO_SPECIFIED
);
442 range
.min
= range
.max
= exp
->saved_proto
;
443 range
.min_ip
= range
.max_ip
444 = ct
->master
->tuplehash
[!exp
->dir
].tuple
.src
.u3
.ip
;
445 nf_nat_setup_info(ct
, &range
, IP_NAT_MANIP_DST
);
447 EXPORT_SYMBOL(nf_nat_follow_master
);