Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[linux-2.6.git] / net / netfilter / nf_conntrack_netlink.c
blobedc410e778f770b7d1bef94cf2de3d0caec0b184
1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
35 #include <net/sock.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_tuple.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_zones.h>
45 #include <net/netfilter/nf_conntrack_timestamp.h>
46 #include <net/netfilter/nf_conntrack_labels.h>
47 #ifdef CONFIG_NF_NAT_NEEDED
48 #include <net/netfilter/nf_nat_core.h>
49 #include <net/netfilter/nf_nat_l4proto.h>
50 #include <net/netfilter/nf_nat_helper.h>
51 #endif
53 #include <linux/netfilter/nfnetlink.h>
54 #include <linux/netfilter/nfnetlink_conntrack.h>
56 MODULE_LICENSE("GPL");
58 static char __initdata version[] = "0.93";
60 static inline int
61 ctnetlink_dump_tuples_proto(struct sk_buff *skb,
62 const struct nf_conntrack_tuple *tuple,
63 struct nf_conntrack_l4proto *l4proto)
65 int ret = 0;
66 struct nlattr *nest_parms;
68 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
69 if (!nest_parms)
70 goto nla_put_failure;
71 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
72 goto nla_put_failure;
74 if (likely(l4proto->tuple_to_nlattr))
75 ret = l4proto->tuple_to_nlattr(skb, tuple);
77 nla_nest_end(skb, nest_parms);
79 return ret;
81 nla_put_failure:
82 return -1;
85 static inline int
86 ctnetlink_dump_tuples_ip(struct sk_buff *skb,
87 const struct nf_conntrack_tuple *tuple,
88 struct nf_conntrack_l3proto *l3proto)
90 int ret = 0;
91 struct nlattr *nest_parms;
93 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
94 if (!nest_parms)
95 goto nla_put_failure;
97 if (likely(l3proto->tuple_to_nlattr))
98 ret = l3proto->tuple_to_nlattr(skb, tuple);
100 nla_nest_end(skb, nest_parms);
102 return ret;
104 nla_put_failure:
105 return -1;
108 static int
109 ctnetlink_dump_tuples(struct sk_buff *skb,
110 const struct nf_conntrack_tuple *tuple)
112 int ret;
113 struct nf_conntrack_l3proto *l3proto;
114 struct nf_conntrack_l4proto *l4proto;
116 rcu_read_lock();
117 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
118 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
120 if (ret >= 0) {
121 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
122 tuple->dst.protonum);
123 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
125 rcu_read_unlock();
126 return ret;
129 static inline int
130 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
132 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
133 goto nla_put_failure;
134 return 0;
136 nla_put_failure:
137 return -1;
140 static inline int
141 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
143 long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
145 if (timeout < 0)
146 timeout = 0;
148 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
149 goto nla_put_failure;
150 return 0;
152 nla_put_failure:
153 return -1;
156 static inline int
157 ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
159 struct nf_conntrack_l4proto *l4proto;
160 struct nlattr *nest_proto;
161 int ret;
163 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
164 if (!l4proto->to_nlattr)
165 return 0;
167 nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
168 if (!nest_proto)
169 goto nla_put_failure;
171 ret = l4proto->to_nlattr(skb, nest_proto, ct);
173 nla_nest_end(skb, nest_proto);
175 return ret;
177 nla_put_failure:
178 return -1;
181 static inline int
182 ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
184 struct nlattr *nest_helper;
185 const struct nf_conn_help *help = nfct_help(ct);
186 struct nf_conntrack_helper *helper;
188 if (!help)
189 return 0;
191 helper = rcu_dereference(help->helper);
192 if (!helper)
193 goto out;
195 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
196 if (!nest_helper)
197 goto nla_put_failure;
198 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
199 goto nla_put_failure;
201 if (helper->to_nlattr)
202 helper->to_nlattr(skb, ct);
204 nla_nest_end(skb, nest_helper);
205 out:
206 return 0;
208 nla_put_failure:
209 return -1;
212 static int
213 dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
214 enum ip_conntrack_dir dir)
216 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
217 struct nlattr *nest_count;
219 nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
220 if (!nest_count)
221 goto nla_put_failure;
223 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
224 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
225 goto nla_put_failure;
227 nla_nest_end(skb, nest_count);
229 return 0;
231 nla_put_failure:
232 return -1;
235 static int
236 ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
237 enum ip_conntrack_dir dir, int type)
239 struct nf_conn_counter *acct;
240 u64 pkts, bytes;
242 acct = nf_conn_acct_find(ct);
243 if (!acct)
244 return 0;
246 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
247 pkts = atomic64_xchg(&acct[dir].packets, 0);
248 bytes = atomic64_xchg(&acct[dir].bytes, 0);
249 } else {
250 pkts = atomic64_read(&acct[dir].packets);
251 bytes = atomic64_read(&acct[dir].bytes);
253 return dump_counters(skb, pkts, bytes, dir);
256 static int
257 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
259 struct nlattr *nest_count;
260 const struct nf_conn_tstamp *tstamp;
262 tstamp = nf_conn_tstamp_find(ct);
263 if (!tstamp)
264 return 0;
266 nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
267 if (!nest_count)
268 goto nla_put_failure;
270 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
271 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
272 cpu_to_be64(tstamp->stop))))
273 goto nla_put_failure;
274 nla_nest_end(skb, nest_count);
276 return 0;
278 nla_put_failure:
279 return -1;
282 #ifdef CONFIG_NF_CONNTRACK_MARK
283 static inline int
284 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
286 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
287 goto nla_put_failure;
288 return 0;
290 nla_put_failure:
291 return -1;
293 #else
294 #define ctnetlink_dump_mark(a, b) (0)
295 #endif
297 #ifdef CONFIG_NF_CONNTRACK_SECMARK
298 static inline int
299 ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
301 struct nlattr *nest_secctx;
302 int len, ret;
303 char *secctx;
305 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
306 if (ret)
307 return 0;
309 ret = -1;
310 nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
311 if (!nest_secctx)
312 goto nla_put_failure;
314 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
315 goto nla_put_failure;
316 nla_nest_end(skb, nest_secctx);
318 ret = 0;
319 nla_put_failure:
320 security_release_secctx(secctx, len);
321 return ret;
323 #else
324 #define ctnetlink_dump_secctx(a, b) (0)
325 #endif
327 #ifdef CONFIG_NF_CONNTRACK_LABELS
328 static int ctnetlink_label_size(const struct nf_conn *ct)
330 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
332 if (!labels)
333 return 0;
334 return nla_total_size(labels->words * sizeof(long));
337 static int
338 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
340 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
341 unsigned int len, i;
343 if (!labels)
344 return 0;
346 len = labels->words * sizeof(long);
347 i = 0;
348 do {
349 if (labels->bits[i] != 0)
350 return nla_put(skb, CTA_LABELS, len, labels->bits);
351 i++;
352 } while (i < labels->words);
354 return 0;
356 #else
357 #define ctnetlink_dump_labels(a, b) (0)
358 #define ctnetlink_label_size(a) (0)
359 #endif
361 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
363 static inline int
364 ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
366 struct nlattr *nest_parms;
368 if (!(ct->status & IPS_EXPECTED))
369 return 0;
371 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
372 if (!nest_parms)
373 goto nla_put_failure;
374 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
375 goto nla_put_failure;
376 nla_nest_end(skb, nest_parms);
378 return 0;
380 nla_put_failure:
381 return -1;
384 #ifdef CONFIG_NF_NAT_NEEDED
385 static int
386 dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
388 struct nlattr *nest_parms;
390 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
391 if (!nest_parms)
392 goto nla_put_failure;
394 if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
395 htonl(natseq->correction_pos)) ||
396 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
397 htonl(natseq->offset_before)) ||
398 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
399 htonl(natseq->offset_after)))
400 goto nla_put_failure;
402 nla_nest_end(skb, nest_parms);
404 return 0;
406 nla_put_failure:
407 return -1;
410 static inline int
411 ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
413 struct nf_nat_seq *natseq;
414 struct nf_conn_nat *nat = nfct_nat(ct);
416 if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
417 return 0;
419 natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
420 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
421 return -1;
423 natseq = &nat->seq[IP_CT_DIR_REPLY];
424 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
425 return -1;
427 return 0;
429 #else
430 #define ctnetlink_dump_nat_seq_adj(a, b) (0)
431 #endif
433 static inline int
434 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
436 if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
437 goto nla_put_failure;
438 return 0;
440 nla_put_failure:
441 return -1;
444 static inline int
445 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
447 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
448 goto nla_put_failure;
449 return 0;
451 nla_put_failure:
452 return -1;
455 static int
456 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
457 struct nf_conn *ct)
459 struct nlmsghdr *nlh;
460 struct nfgenmsg *nfmsg;
461 struct nlattr *nest_parms;
462 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
464 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
465 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
466 if (nlh == NULL)
467 goto nlmsg_failure;
469 nfmsg = nlmsg_data(nlh);
470 nfmsg->nfgen_family = nf_ct_l3num(ct);
471 nfmsg->version = NFNETLINK_V0;
472 nfmsg->res_id = 0;
474 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
475 if (!nest_parms)
476 goto nla_put_failure;
477 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
478 goto nla_put_failure;
479 nla_nest_end(skb, nest_parms);
481 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
482 if (!nest_parms)
483 goto nla_put_failure;
484 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
485 goto nla_put_failure;
486 nla_nest_end(skb, nest_parms);
488 if (nf_ct_zone(ct) &&
489 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
490 goto nla_put_failure;
492 if (ctnetlink_dump_status(skb, ct) < 0 ||
493 ctnetlink_dump_timeout(skb, ct) < 0 ||
494 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
495 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
496 ctnetlink_dump_timestamp(skb, ct) < 0 ||
497 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
498 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
499 ctnetlink_dump_mark(skb, ct) < 0 ||
500 ctnetlink_dump_secctx(skb, ct) < 0 ||
501 ctnetlink_dump_labels(skb, ct) < 0 ||
502 ctnetlink_dump_id(skb, ct) < 0 ||
503 ctnetlink_dump_use(skb, ct) < 0 ||
504 ctnetlink_dump_master(skb, ct) < 0 ||
505 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
506 goto nla_put_failure;
508 nlmsg_end(skb, nlh);
509 return skb->len;
511 nlmsg_failure:
512 nla_put_failure:
513 nlmsg_cancel(skb, nlh);
514 return -1;
517 static inline size_t
518 ctnetlink_proto_size(const struct nf_conn *ct)
520 struct nf_conntrack_l3proto *l3proto;
521 struct nf_conntrack_l4proto *l4proto;
522 size_t len = 0;
524 rcu_read_lock();
525 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
526 len += l3proto->nla_size;
528 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
529 len += l4proto->nla_size;
530 rcu_read_unlock();
532 return len;
535 static inline size_t
536 ctnetlink_counters_size(const struct nf_conn *ct)
538 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
539 return 0;
540 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
541 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
542 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
546 static inline int
547 ctnetlink_secctx_size(const struct nf_conn *ct)
549 #ifdef CONFIG_NF_CONNTRACK_SECMARK
550 int len, ret;
552 ret = security_secid_to_secctx(ct->secmark, NULL, &len);
553 if (ret)
554 return 0;
556 return nla_total_size(0) /* CTA_SECCTX */
557 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
558 #else
559 return 0;
560 #endif
563 static inline size_t
564 ctnetlink_timestamp_size(const struct nf_conn *ct)
566 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
567 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
568 return 0;
569 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
570 #else
571 return 0;
572 #endif
575 static inline size_t
576 ctnetlink_nlmsg_size(const struct nf_conn *ct)
578 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
579 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
580 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
581 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
582 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
583 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
584 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
585 + ctnetlink_counters_size(ct)
586 + ctnetlink_timestamp_size(ct)
587 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
588 + nla_total_size(0) /* CTA_PROTOINFO */
589 + nla_total_size(0) /* CTA_HELP */
590 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
591 + ctnetlink_secctx_size(ct)
592 #ifdef CONFIG_NF_NAT_NEEDED
593 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
594 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
595 #endif
596 #ifdef CONFIG_NF_CONNTRACK_MARK
597 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
598 #endif
599 + ctnetlink_proto_size(ct)
600 + ctnetlink_label_size(ct)
604 #ifdef CONFIG_NF_CONNTRACK_EVENTS
605 static int
606 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
608 struct net *net;
609 struct nlmsghdr *nlh;
610 struct nfgenmsg *nfmsg;
611 struct nlattr *nest_parms;
612 struct nf_conn *ct = item->ct;
613 struct sk_buff *skb;
614 unsigned int type;
615 unsigned int flags = 0, group;
616 int err;
618 /* ignore our fake conntrack entry */
619 if (nf_ct_is_untracked(ct))
620 return 0;
622 if (events & (1 << IPCT_DESTROY)) {
623 type = IPCTNL_MSG_CT_DELETE;
624 group = NFNLGRP_CONNTRACK_DESTROY;
625 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
626 type = IPCTNL_MSG_CT_NEW;
627 flags = NLM_F_CREATE|NLM_F_EXCL;
628 group = NFNLGRP_CONNTRACK_NEW;
629 } else if (events) {
630 type = IPCTNL_MSG_CT_NEW;
631 group = NFNLGRP_CONNTRACK_UPDATE;
632 } else
633 return 0;
635 net = nf_ct_net(ct);
636 if (!item->report && !nfnetlink_has_listeners(net, group))
637 return 0;
639 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
640 if (skb == NULL)
641 goto errout;
643 type |= NFNL_SUBSYS_CTNETLINK << 8;
644 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
645 if (nlh == NULL)
646 goto nlmsg_failure;
648 nfmsg = nlmsg_data(nlh);
649 nfmsg->nfgen_family = nf_ct_l3num(ct);
650 nfmsg->version = NFNETLINK_V0;
651 nfmsg->res_id = 0;
653 rcu_read_lock();
654 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
655 if (!nest_parms)
656 goto nla_put_failure;
657 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
658 goto nla_put_failure;
659 nla_nest_end(skb, nest_parms);
661 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
662 if (!nest_parms)
663 goto nla_put_failure;
664 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
665 goto nla_put_failure;
666 nla_nest_end(skb, nest_parms);
668 if (nf_ct_zone(ct) &&
669 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
670 goto nla_put_failure;
672 if (ctnetlink_dump_id(skb, ct) < 0)
673 goto nla_put_failure;
675 if (ctnetlink_dump_status(skb, ct) < 0)
676 goto nla_put_failure;
678 if (events & (1 << IPCT_DESTROY)) {
679 if (ctnetlink_dump_counters(skb, ct,
680 IP_CT_DIR_ORIGINAL, type) < 0 ||
681 ctnetlink_dump_counters(skb, ct,
682 IP_CT_DIR_REPLY, type) < 0 ||
683 ctnetlink_dump_timestamp(skb, ct) < 0)
684 goto nla_put_failure;
685 } else {
686 if (ctnetlink_dump_timeout(skb, ct) < 0)
687 goto nla_put_failure;
689 if (events & (1 << IPCT_PROTOINFO)
690 && ctnetlink_dump_protoinfo(skb, ct) < 0)
691 goto nla_put_failure;
693 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
694 && ctnetlink_dump_helpinfo(skb, ct) < 0)
695 goto nla_put_failure;
697 #ifdef CONFIG_NF_CONNTRACK_SECMARK
698 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
699 && ctnetlink_dump_secctx(skb, ct) < 0)
700 goto nla_put_failure;
701 #endif
702 if (events & (1 << IPCT_LABEL) &&
703 ctnetlink_dump_labels(skb, ct) < 0)
704 goto nla_put_failure;
706 if (events & (1 << IPCT_RELATED) &&
707 ctnetlink_dump_master(skb, ct) < 0)
708 goto nla_put_failure;
710 if (events & (1 << IPCT_NATSEQADJ) &&
711 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
712 goto nla_put_failure;
715 #ifdef CONFIG_NF_CONNTRACK_MARK
716 if ((events & (1 << IPCT_MARK) || ct->mark)
717 && ctnetlink_dump_mark(skb, ct) < 0)
718 goto nla_put_failure;
719 #endif
720 rcu_read_unlock();
722 nlmsg_end(skb, nlh);
723 err = nfnetlink_send(skb, net, item->portid, group, item->report,
724 GFP_ATOMIC);
725 if (err == -ENOBUFS || err == -EAGAIN)
726 return -ENOBUFS;
728 return 0;
730 nla_put_failure:
731 rcu_read_unlock();
732 nlmsg_cancel(skb, nlh);
733 nlmsg_failure:
734 kfree_skb(skb);
735 errout:
736 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
737 return -ENOBUFS;
739 return 0;
741 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
743 static int ctnetlink_done(struct netlink_callback *cb)
745 if (cb->args[1])
746 nf_ct_put((struct nf_conn *)cb->args[1]);
747 if (cb->data)
748 kfree(cb->data);
749 return 0;
752 struct ctnetlink_dump_filter {
753 struct {
754 u_int32_t val;
755 u_int32_t mask;
756 } mark;
759 static int
760 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
762 struct net *net = sock_net(skb->sk);
763 struct nf_conn *ct, *last;
764 struct nf_conntrack_tuple_hash *h;
765 struct hlist_nulls_node *n;
766 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
767 u_int8_t l3proto = nfmsg->nfgen_family;
768 int res;
769 #ifdef CONFIG_NF_CONNTRACK_MARK
770 const struct ctnetlink_dump_filter *filter = cb->data;
771 #endif
773 spin_lock_bh(&nf_conntrack_lock);
774 last = (struct nf_conn *)cb->args[1];
775 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
776 restart:
777 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
778 hnnode) {
779 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
780 continue;
781 ct = nf_ct_tuplehash_to_ctrack(h);
782 /* Dump entries of a given L3 protocol number.
783 * If it is not specified, ie. l3proto == 0,
784 * then dump everything. */
785 if (l3proto && nf_ct_l3num(ct) != l3proto)
786 continue;
787 if (cb->args[1]) {
788 if (ct != last)
789 continue;
790 cb->args[1] = 0;
792 #ifdef CONFIG_NF_CONNTRACK_MARK
793 if (filter && !((ct->mark & filter->mark.mask) ==
794 filter->mark.val)) {
795 continue;
797 #endif
798 rcu_read_lock();
799 res =
800 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
801 cb->nlh->nlmsg_seq,
802 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
803 ct);
804 rcu_read_unlock();
805 if (res < 0) {
806 nf_conntrack_get(&ct->ct_general);
807 cb->args[1] = (unsigned long)ct;
808 goto out;
811 if (cb->args[1]) {
812 cb->args[1] = 0;
813 goto restart;
816 out:
817 spin_unlock_bh(&nf_conntrack_lock);
818 if (last)
819 nf_ct_put(last);
821 return skb->len;
824 static inline int
825 ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
827 struct nlattr *tb[CTA_IP_MAX+1];
828 struct nf_conntrack_l3proto *l3proto;
829 int ret = 0;
831 ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
832 if (ret < 0)
833 return ret;
835 rcu_read_lock();
836 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
838 if (likely(l3proto->nlattr_to_tuple)) {
839 ret = nla_validate_nested(attr, CTA_IP_MAX,
840 l3proto->nla_policy);
841 if (ret == 0)
842 ret = l3proto->nlattr_to_tuple(tb, tuple);
845 rcu_read_unlock();
847 return ret;
850 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
851 [CTA_PROTO_NUM] = { .type = NLA_U8 },
854 static inline int
855 ctnetlink_parse_tuple_proto(struct nlattr *attr,
856 struct nf_conntrack_tuple *tuple)
858 struct nlattr *tb[CTA_PROTO_MAX+1];
859 struct nf_conntrack_l4proto *l4proto;
860 int ret = 0;
862 ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
863 if (ret < 0)
864 return ret;
866 if (!tb[CTA_PROTO_NUM])
867 return -EINVAL;
868 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
870 rcu_read_lock();
871 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
873 if (likely(l4proto->nlattr_to_tuple)) {
874 ret = nla_validate_nested(attr, CTA_PROTO_MAX,
875 l4proto->nla_policy);
876 if (ret == 0)
877 ret = l4proto->nlattr_to_tuple(tb, tuple);
880 rcu_read_unlock();
882 return ret;
885 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
886 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
887 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
890 static int
891 ctnetlink_parse_tuple(const struct nlattr * const cda[],
892 struct nf_conntrack_tuple *tuple,
893 enum ctattr_type type, u_int8_t l3num)
895 struct nlattr *tb[CTA_TUPLE_MAX+1];
896 int err;
898 memset(tuple, 0, sizeof(*tuple));
900 err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
901 if (err < 0)
902 return err;
904 if (!tb[CTA_TUPLE_IP])
905 return -EINVAL;
907 tuple->src.l3num = l3num;
909 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
910 if (err < 0)
911 return err;
913 if (!tb[CTA_TUPLE_PROTO])
914 return -EINVAL;
916 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
917 if (err < 0)
918 return err;
920 /* orig and expect tuples get DIR_ORIGINAL */
921 if (type == CTA_TUPLE_REPLY)
922 tuple->dst.dir = IP_CT_DIR_REPLY;
923 else
924 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
926 return 0;
929 static int
930 ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
932 if (attr)
933 #ifdef CONFIG_NF_CONNTRACK_ZONES
934 *zone = ntohs(nla_get_be16(attr));
935 #else
936 return -EOPNOTSUPP;
937 #endif
938 else
939 *zone = 0;
941 return 0;
944 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
945 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
946 .len = NF_CT_HELPER_NAME_LEN - 1 },
949 static inline int
950 ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
951 struct nlattr **helpinfo)
953 int err;
954 struct nlattr *tb[CTA_HELP_MAX+1];
956 err = nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
957 if (err < 0)
958 return err;
960 if (!tb[CTA_HELP_NAME])
961 return -EINVAL;
963 *helper_name = nla_data(tb[CTA_HELP_NAME]);
965 if (tb[CTA_HELP_INFO])
966 *helpinfo = tb[CTA_HELP_INFO];
968 return 0;
971 #define __CTA_LABELS_MAX_LENGTH ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
972 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
973 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
974 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
975 [CTA_STATUS] = { .type = NLA_U32 },
976 [CTA_PROTOINFO] = { .type = NLA_NESTED },
977 [CTA_HELP] = { .type = NLA_NESTED },
978 [CTA_NAT_SRC] = { .type = NLA_NESTED },
979 [CTA_TIMEOUT] = { .type = NLA_U32 },
980 [CTA_MARK] = { .type = NLA_U32 },
981 [CTA_ID] = { .type = NLA_U32 },
982 [CTA_NAT_DST] = { .type = NLA_NESTED },
983 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
984 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
985 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
986 [CTA_ZONE] = { .type = NLA_U16 },
987 [CTA_MARK_MASK] = { .type = NLA_U32 },
988 [CTA_LABELS] = { .type = NLA_BINARY,
989 .len = __CTA_LABELS_MAX_LENGTH },
990 [CTA_LABELS_MASK] = { .type = NLA_BINARY,
991 .len = __CTA_LABELS_MAX_LENGTH },
994 static int
995 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
996 const struct nlmsghdr *nlh,
997 const struct nlattr * const cda[])
999 struct net *net = sock_net(ctnl);
1000 struct nf_conntrack_tuple_hash *h;
1001 struct nf_conntrack_tuple tuple;
1002 struct nf_conn *ct;
1003 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1004 u_int8_t u3 = nfmsg->nfgen_family;
1005 u16 zone;
1006 int err;
1008 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1009 if (err < 0)
1010 return err;
1012 if (cda[CTA_TUPLE_ORIG])
1013 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1014 else if (cda[CTA_TUPLE_REPLY])
1015 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1016 else {
1017 /* Flush the whole table */
1018 nf_conntrack_flush_report(net,
1019 NETLINK_CB(skb).portid,
1020 nlmsg_report(nlh));
1021 return 0;
1024 if (err < 0)
1025 return err;
1027 h = nf_conntrack_find_get(net, zone, &tuple);
1028 if (!h)
1029 return -ENOENT;
1031 ct = nf_ct_tuplehash_to_ctrack(h);
1033 if (cda[CTA_ID]) {
1034 u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
1035 if (id != (u32)(unsigned long)ct) {
1036 nf_ct_put(ct);
1037 return -ENOENT;
1041 if (del_timer(&ct->timeout)) {
1042 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
1043 NETLINK_CB(skb).portid,
1044 nlmsg_report(nlh)) < 0) {
1045 nf_ct_delete_from_lists(ct);
1046 /* we failed to report the event, try later */
1047 nf_ct_dying_timeout(ct);
1048 nf_ct_put(ct);
1049 return 0;
1051 /* death_by_timeout would report the event again */
1052 set_bit(IPS_DYING_BIT, &ct->status);
1053 nf_ct_delete_from_lists(ct);
1054 nf_ct_put(ct);
1056 nf_ct_put(ct);
1058 return 0;
1061 static int
1062 ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
1063 const struct nlmsghdr *nlh,
1064 const struct nlattr * const cda[])
1066 struct net *net = sock_net(ctnl);
1067 struct nf_conntrack_tuple_hash *h;
1068 struct nf_conntrack_tuple tuple;
1069 struct nf_conn *ct;
1070 struct sk_buff *skb2 = NULL;
1071 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1072 u_int8_t u3 = nfmsg->nfgen_family;
1073 u16 zone;
1074 int err;
1076 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1077 struct netlink_dump_control c = {
1078 .dump = ctnetlink_dump_table,
1079 .done = ctnetlink_done,
1081 #ifdef CONFIG_NF_CONNTRACK_MARK
1082 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1083 struct ctnetlink_dump_filter *filter;
1085 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1086 GFP_ATOMIC);
1087 if (filter == NULL)
1088 return -ENOMEM;
1090 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1091 filter->mark.mask =
1092 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1093 c.data = filter;
1095 #endif
1096 return netlink_dump_start(ctnl, skb, nlh, &c);
1099 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1100 if (err < 0)
1101 return err;
1103 if (cda[CTA_TUPLE_ORIG])
1104 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1105 else if (cda[CTA_TUPLE_REPLY])
1106 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1107 else
1108 return -EINVAL;
1110 if (err < 0)
1111 return err;
1113 h = nf_conntrack_find_get(net, zone, &tuple);
1114 if (!h)
1115 return -ENOENT;
1117 ct = nf_ct_tuplehash_to_ctrack(h);
1119 err = -ENOMEM;
1120 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1121 if (skb2 == NULL) {
1122 nf_ct_put(ct);
1123 return -ENOMEM;
1126 rcu_read_lock();
1127 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1128 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1129 rcu_read_unlock();
1130 nf_ct_put(ct);
1131 if (err <= 0)
1132 goto free;
1134 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1135 if (err < 0)
1136 goto out;
1138 return 0;
1140 free:
1141 kfree_skb(skb2);
1142 out:
1143 /* this avoids a loop in nfnetlink. */
1144 return err == -EAGAIN ? -ENOBUFS : err;
1147 static int ctnetlink_done_list(struct netlink_callback *cb)
1149 if (cb->args[1])
1150 nf_ct_put((struct nf_conn *)cb->args[1]);
1151 return 0;
1154 static int
1155 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb,
1156 struct hlist_nulls_head *list)
1158 struct nf_conn *ct, *last;
1159 struct nf_conntrack_tuple_hash *h;
1160 struct hlist_nulls_node *n;
1161 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1162 u_int8_t l3proto = nfmsg->nfgen_family;
1163 int res;
1165 if (cb->args[2])
1166 return 0;
1168 spin_lock_bh(&nf_conntrack_lock);
1169 last = (struct nf_conn *)cb->args[1];
1170 restart:
1171 hlist_nulls_for_each_entry(h, n, list, hnnode) {
1172 ct = nf_ct_tuplehash_to_ctrack(h);
1173 if (l3proto && nf_ct_l3num(ct) != l3proto)
1174 continue;
1175 if (cb->args[1]) {
1176 if (ct != last)
1177 continue;
1178 cb->args[1] = 0;
1180 rcu_read_lock();
1181 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1182 cb->nlh->nlmsg_seq,
1183 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1184 ct);
1185 rcu_read_unlock();
1186 if (res < 0) {
1187 nf_conntrack_get(&ct->ct_general);
1188 cb->args[1] = (unsigned long)ct;
1189 goto out;
1192 if (cb->args[1]) {
1193 cb->args[1] = 0;
1194 goto restart;
1195 } else
1196 cb->args[2] = 1;
1197 out:
1198 spin_unlock_bh(&nf_conntrack_lock);
1199 if (last)
1200 nf_ct_put(last);
1202 return skb->len;
1205 static int
1206 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1208 struct net *net = sock_net(skb->sk);
1210 return ctnetlink_dump_list(skb, cb, &net->ct.dying);
1213 static int
1214 ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
1215 const struct nlmsghdr *nlh,
1216 const struct nlattr * const cda[])
1218 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1219 struct netlink_dump_control c = {
1220 .dump = ctnetlink_dump_dying,
1221 .done = ctnetlink_done_list,
1223 return netlink_dump_start(ctnl, skb, nlh, &c);
1226 return -EOPNOTSUPP;
1229 static int
1230 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1232 struct net *net = sock_net(skb->sk);
1234 return ctnetlink_dump_list(skb, cb, &net->ct.unconfirmed);
1237 static int
1238 ctnetlink_get_ct_unconfirmed(struct sock *ctnl, struct sk_buff *skb,
1239 const struct nlmsghdr *nlh,
1240 const struct nlattr * const cda[])
1242 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1243 struct netlink_dump_control c = {
1244 .dump = ctnetlink_dump_unconfirmed,
1245 .done = ctnetlink_done_list,
1247 return netlink_dump_start(ctnl, skb, nlh, &c);
1250 return -EOPNOTSUPP;
1253 #ifdef CONFIG_NF_NAT_NEEDED
1254 static int
1255 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1256 enum nf_nat_manip_type manip,
1257 const struct nlattr *attr)
1259 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1260 int err;
1262 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1263 if (!parse_nat_setup) {
1264 #ifdef CONFIG_MODULES
1265 rcu_read_unlock();
1266 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1267 if (request_module("nf-nat") < 0) {
1268 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1269 rcu_read_lock();
1270 return -EOPNOTSUPP;
1272 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1273 rcu_read_lock();
1274 if (nfnetlink_parse_nat_setup_hook)
1275 return -EAGAIN;
1276 #endif
1277 return -EOPNOTSUPP;
1280 err = parse_nat_setup(ct, manip, attr);
1281 if (err == -EAGAIN) {
1282 #ifdef CONFIG_MODULES
1283 rcu_read_unlock();
1284 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1285 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1286 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1287 rcu_read_lock();
1288 return -EOPNOTSUPP;
1290 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1291 rcu_read_lock();
1292 #else
1293 err = -EOPNOTSUPP;
1294 #endif
1296 return err;
1298 #endif
1300 static int
1301 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1303 unsigned long d;
1304 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1305 d = ct->status ^ status;
1307 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1308 /* unchangeable */
1309 return -EBUSY;
1311 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1312 /* SEEN_REPLY bit can only be set */
1313 return -EBUSY;
1315 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1316 /* ASSURED bit can only be set */
1317 return -EBUSY;
1319 /* Be careful here, modifying NAT bits can screw up things,
1320 * so don't let users modify them directly if they don't pass
1321 * nf_nat_range. */
1322 ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
1323 return 0;
1326 static int
1327 ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1329 #ifdef CONFIG_NF_NAT_NEEDED
1330 int ret;
1332 if (cda[CTA_NAT_DST]) {
1333 ret = ctnetlink_parse_nat_setup(ct,
1334 NF_NAT_MANIP_DST,
1335 cda[CTA_NAT_DST]);
1336 if (ret < 0)
1337 return ret;
1339 if (cda[CTA_NAT_SRC]) {
1340 ret = ctnetlink_parse_nat_setup(ct,
1341 NF_NAT_MANIP_SRC,
1342 cda[CTA_NAT_SRC]);
1343 if (ret < 0)
1344 return ret;
1346 return 0;
1347 #else
1348 return -EOPNOTSUPP;
1349 #endif
1352 static inline int
1353 ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1355 struct nf_conntrack_helper *helper;
1356 struct nf_conn_help *help = nfct_help(ct);
1357 char *helpname = NULL;
1358 struct nlattr *helpinfo = NULL;
1359 int err;
1361 /* don't change helper of sibling connections */
1362 if (ct->master)
1363 return -EBUSY;
1365 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1366 if (err < 0)
1367 return err;
1369 if (!strcmp(helpname, "")) {
1370 if (help && help->helper) {
1371 /* we had a helper before ... */
1372 nf_ct_remove_expectations(ct);
1373 RCU_INIT_POINTER(help->helper, NULL);
1376 return 0;
1379 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1380 nf_ct_protonum(ct));
1381 if (helper == NULL) {
1382 #ifdef CONFIG_MODULES
1383 spin_unlock_bh(&nf_conntrack_lock);
1385 if (request_module("nfct-helper-%s", helpname) < 0) {
1386 spin_lock_bh(&nf_conntrack_lock);
1387 return -EOPNOTSUPP;
1390 spin_lock_bh(&nf_conntrack_lock);
1391 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1392 nf_ct_protonum(ct));
1393 if (helper)
1394 return -EAGAIN;
1395 #endif
1396 return -EOPNOTSUPP;
1399 if (help) {
1400 if (help->helper == helper) {
1401 /* update private helper data if allowed. */
1402 if (helper->from_nlattr)
1403 helper->from_nlattr(helpinfo, ct);
1404 return 0;
1405 } else
1406 return -EBUSY;
1409 /* we cannot set a helper for an existing conntrack */
1410 return -EOPNOTSUPP;
1413 static inline int
1414 ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1416 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1418 if (!del_timer(&ct->timeout))
1419 return -ETIME;
1421 ct->timeout.expires = jiffies + timeout * HZ;
1422 add_timer(&ct->timeout);
1424 return 0;
1427 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1428 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1429 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1430 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1433 static inline int
1434 ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1436 const struct nlattr *attr = cda[CTA_PROTOINFO];
1437 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1438 struct nf_conntrack_l4proto *l4proto;
1439 int err = 0;
1441 err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1442 if (err < 0)
1443 return err;
1445 rcu_read_lock();
1446 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1447 if (l4proto->from_nlattr)
1448 err = l4proto->from_nlattr(tb, ct);
1449 rcu_read_unlock();
1451 return err;
1454 #ifdef CONFIG_NF_NAT_NEEDED
1455 static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1456 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1457 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1458 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1461 static inline int
1462 change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1464 int err;
1465 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1467 err = nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1468 if (err < 0)
1469 return err;
1471 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1472 return -EINVAL;
1474 natseq->correction_pos =
1475 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
1477 if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
1478 return -EINVAL;
1480 natseq->offset_before =
1481 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
1483 if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
1484 return -EINVAL;
1486 natseq->offset_after =
1487 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
1489 return 0;
1492 static int
1493 ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
1494 const struct nlattr * const cda[])
1496 int ret = 0;
1497 struct nf_conn_nat *nat = nfct_nat(ct);
1499 if (!nat)
1500 return 0;
1502 if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
1503 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
1504 cda[CTA_NAT_SEQ_ADJ_ORIG]);
1505 if (ret < 0)
1506 return ret;
1508 ct->status |= IPS_SEQ_ADJUST;
1511 if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1512 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
1513 cda[CTA_NAT_SEQ_ADJ_REPLY]);
1514 if (ret < 0)
1515 return ret;
1517 ct->status |= IPS_SEQ_ADJUST;
1520 return 0;
1522 #endif
1524 static int
1525 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
1527 #ifdef CONFIG_NF_CONNTRACK_LABELS
1528 size_t len = nla_len(cda[CTA_LABELS]);
1529 const void *mask = cda[CTA_LABELS_MASK];
1531 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
1532 return -EINVAL;
1534 if (mask) {
1535 if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
1536 nla_len(cda[CTA_LABELS_MASK]) != len)
1537 return -EINVAL;
1538 mask = nla_data(cda[CTA_LABELS_MASK]);
1541 len /= sizeof(u32);
1543 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
1544 #else
1545 return -EOPNOTSUPP;
1546 #endif
1549 static int
1550 ctnetlink_change_conntrack(struct nf_conn *ct,
1551 const struct nlattr * const cda[])
1553 int err;
1555 /* only allow NAT changes and master assignation for new conntracks */
1556 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1557 return -EOPNOTSUPP;
1559 if (cda[CTA_HELP]) {
1560 err = ctnetlink_change_helper(ct, cda);
1561 if (err < 0)
1562 return err;
1565 if (cda[CTA_TIMEOUT]) {
1566 err = ctnetlink_change_timeout(ct, cda);
1567 if (err < 0)
1568 return err;
1571 if (cda[CTA_STATUS]) {
1572 err = ctnetlink_change_status(ct, cda);
1573 if (err < 0)
1574 return err;
1577 if (cda[CTA_PROTOINFO]) {
1578 err = ctnetlink_change_protoinfo(ct, cda);
1579 if (err < 0)
1580 return err;
1583 #if defined(CONFIG_NF_CONNTRACK_MARK)
1584 if (cda[CTA_MARK])
1585 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1586 #endif
1588 #ifdef CONFIG_NF_NAT_NEEDED
1589 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1590 err = ctnetlink_change_nat_seq_adj(ct, cda);
1591 if (err < 0)
1592 return err;
1594 #endif
1595 if (cda[CTA_LABELS]) {
1596 err = ctnetlink_attach_labels(ct, cda);
1597 if (err < 0)
1598 return err;
1601 return 0;
1604 static struct nf_conn *
1605 ctnetlink_create_conntrack(struct net *net, u16 zone,
1606 const struct nlattr * const cda[],
1607 struct nf_conntrack_tuple *otuple,
1608 struct nf_conntrack_tuple *rtuple,
1609 u8 u3)
1611 struct nf_conn *ct;
1612 int err = -EINVAL;
1613 struct nf_conntrack_helper *helper;
1614 struct nf_conn_tstamp *tstamp;
1616 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1617 if (IS_ERR(ct))
1618 return ERR_PTR(-ENOMEM);
1620 if (!cda[CTA_TIMEOUT])
1621 goto err1;
1622 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1624 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1626 rcu_read_lock();
1627 if (cda[CTA_HELP]) {
1628 char *helpname = NULL;
1629 struct nlattr *helpinfo = NULL;
1631 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1632 if (err < 0)
1633 goto err2;
1635 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1636 nf_ct_protonum(ct));
1637 if (helper == NULL) {
1638 rcu_read_unlock();
1639 #ifdef CONFIG_MODULES
1640 if (request_module("nfct-helper-%s", helpname) < 0) {
1641 err = -EOPNOTSUPP;
1642 goto err1;
1645 rcu_read_lock();
1646 helper = __nf_conntrack_helper_find(helpname,
1647 nf_ct_l3num(ct),
1648 nf_ct_protonum(ct));
1649 if (helper) {
1650 err = -EAGAIN;
1651 goto err2;
1653 rcu_read_unlock();
1654 #endif
1655 err = -EOPNOTSUPP;
1656 goto err1;
1657 } else {
1658 struct nf_conn_help *help;
1660 help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
1661 if (help == NULL) {
1662 err = -ENOMEM;
1663 goto err2;
1665 /* set private helper data if allowed. */
1666 if (helper->from_nlattr)
1667 helper->from_nlattr(helpinfo, ct);
1669 /* not in hash table yet so not strictly necessary */
1670 RCU_INIT_POINTER(help->helper, helper);
1672 } else {
1673 /* try an implicit helper assignation */
1674 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1675 if (err < 0)
1676 goto err2;
1679 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1680 err = ctnetlink_change_nat(ct, cda);
1681 if (err < 0)
1682 goto err2;
1685 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1686 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1687 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1688 nf_ct_labels_ext_add(ct);
1690 /* we must add conntrack extensions before confirmation. */
1691 ct->status |= IPS_CONFIRMED;
1693 if (cda[CTA_STATUS]) {
1694 err = ctnetlink_change_status(ct, cda);
1695 if (err < 0)
1696 goto err2;
1699 #ifdef CONFIG_NF_NAT_NEEDED
1700 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1701 err = ctnetlink_change_nat_seq_adj(ct, cda);
1702 if (err < 0)
1703 goto err2;
1705 #endif
1707 memset(&ct->proto, 0, sizeof(ct->proto));
1708 if (cda[CTA_PROTOINFO]) {
1709 err = ctnetlink_change_protoinfo(ct, cda);
1710 if (err < 0)
1711 goto err2;
1714 #if defined(CONFIG_NF_CONNTRACK_MARK)
1715 if (cda[CTA_MARK])
1716 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1717 #endif
1719 /* setup master conntrack: this is a confirmed expectation */
1720 if (cda[CTA_TUPLE_MASTER]) {
1721 struct nf_conntrack_tuple master;
1722 struct nf_conntrack_tuple_hash *master_h;
1723 struct nf_conn *master_ct;
1725 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1726 if (err < 0)
1727 goto err2;
1729 master_h = nf_conntrack_find_get(net, zone, &master);
1730 if (master_h == NULL) {
1731 err = -ENOENT;
1732 goto err2;
1734 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1735 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1736 ct->master = master_ct;
1738 tstamp = nf_conn_tstamp_find(ct);
1739 if (tstamp)
1740 tstamp->start = ktime_to_ns(ktime_get_real());
1742 err = nf_conntrack_hash_check_insert(ct);
1743 if (err < 0)
1744 goto err2;
1746 rcu_read_unlock();
1748 return ct;
1750 err2:
1751 rcu_read_unlock();
1752 err1:
1753 nf_conntrack_free(ct);
1754 return ERR_PTR(err);
1757 static int
1758 ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1759 const struct nlmsghdr *nlh,
1760 const struct nlattr * const cda[])
1762 struct net *net = sock_net(ctnl);
1763 struct nf_conntrack_tuple otuple, rtuple;
1764 struct nf_conntrack_tuple_hash *h = NULL;
1765 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1766 struct nf_conn *ct;
1767 u_int8_t u3 = nfmsg->nfgen_family;
1768 u16 zone;
1769 int err;
1771 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1772 if (err < 0)
1773 return err;
1775 if (cda[CTA_TUPLE_ORIG]) {
1776 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
1777 if (err < 0)
1778 return err;
1781 if (cda[CTA_TUPLE_REPLY]) {
1782 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
1783 if (err < 0)
1784 return err;
1787 if (cda[CTA_TUPLE_ORIG])
1788 h = nf_conntrack_find_get(net, zone, &otuple);
1789 else if (cda[CTA_TUPLE_REPLY])
1790 h = nf_conntrack_find_get(net, zone, &rtuple);
1792 if (h == NULL) {
1793 err = -ENOENT;
1794 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1795 enum ip_conntrack_events events;
1797 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1798 return -EINVAL;
1800 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1801 &rtuple, u3);
1802 if (IS_ERR(ct))
1803 return PTR_ERR(ct);
1805 err = 0;
1806 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1807 events = IPCT_RELATED;
1808 else
1809 events = IPCT_NEW;
1811 if (cda[CTA_LABELS] &&
1812 ctnetlink_attach_labels(ct, cda) == 0)
1813 events |= (1 << IPCT_LABEL);
1815 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1816 (1 << IPCT_ASSURED) |
1817 (1 << IPCT_HELPER) |
1818 (1 << IPCT_PROTOINFO) |
1819 (1 << IPCT_NATSEQADJ) |
1820 (1 << IPCT_MARK) | events,
1821 ct, NETLINK_CB(skb).portid,
1822 nlmsg_report(nlh));
1823 nf_ct_put(ct);
1826 return err;
1828 /* implicit 'else' */
1830 err = -EEXIST;
1831 ct = nf_ct_tuplehash_to_ctrack(h);
1832 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1833 spin_lock_bh(&nf_conntrack_lock);
1834 err = ctnetlink_change_conntrack(ct, cda);
1835 spin_unlock_bh(&nf_conntrack_lock);
1836 if (err == 0) {
1837 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1838 (1 << IPCT_ASSURED) |
1839 (1 << IPCT_HELPER) |
1840 (1 << IPCT_LABEL) |
1841 (1 << IPCT_PROTOINFO) |
1842 (1 << IPCT_NATSEQADJ) |
1843 (1 << IPCT_MARK),
1844 ct, NETLINK_CB(skb).portid,
1845 nlmsg_report(nlh));
1849 nf_ct_put(ct);
1850 return err;
1853 static int
1854 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
1855 __u16 cpu, const struct ip_conntrack_stat *st)
1857 struct nlmsghdr *nlh;
1858 struct nfgenmsg *nfmsg;
1859 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1861 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1862 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1863 if (nlh == NULL)
1864 goto nlmsg_failure;
1866 nfmsg = nlmsg_data(nlh);
1867 nfmsg->nfgen_family = AF_UNSPEC;
1868 nfmsg->version = NFNETLINK_V0;
1869 nfmsg->res_id = htons(cpu);
1871 if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
1872 nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
1873 nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
1874 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
1875 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
1876 nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
1877 nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
1878 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
1879 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
1880 htonl(st->insert_failed)) ||
1881 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
1882 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
1883 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
1884 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
1885 htonl(st->search_restart)))
1886 goto nla_put_failure;
1888 nlmsg_end(skb, nlh);
1889 return skb->len;
1891 nla_put_failure:
1892 nlmsg_failure:
1893 nlmsg_cancel(skb, nlh);
1894 return -1;
1897 static int
1898 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1900 int cpu;
1901 struct net *net = sock_net(skb->sk);
1903 if (cb->args[0] == nr_cpu_ids)
1904 return 0;
1906 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1907 const struct ip_conntrack_stat *st;
1909 if (!cpu_possible(cpu))
1910 continue;
1912 st = per_cpu_ptr(net->ct.stat, cpu);
1913 if (ctnetlink_ct_stat_cpu_fill_info(skb,
1914 NETLINK_CB(cb->skb).portid,
1915 cb->nlh->nlmsg_seq,
1916 cpu, st) < 0)
1917 break;
1919 cb->args[0] = cpu;
1921 return skb->len;
1924 static int
1925 ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1926 const struct nlmsghdr *nlh,
1927 const struct nlattr * const cda[])
1929 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1930 struct netlink_dump_control c = {
1931 .dump = ctnetlink_ct_stat_cpu_dump,
1933 return netlink_dump_start(ctnl, skb, nlh, &c);
1936 return 0;
1939 static int
1940 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
1941 struct net *net)
1943 struct nlmsghdr *nlh;
1944 struct nfgenmsg *nfmsg;
1945 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1946 unsigned int nr_conntracks = atomic_read(&net->ct.count);
1948 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1949 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1950 if (nlh == NULL)
1951 goto nlmsg_failure;
1953 nfmsg = nlmsg_data(nlh);
1954 nfmsg->nfgen_family = AF_UNSPEC;
1955 nfmsg->version = NFNETLINK_V0;
1956 nfmsg->res_id = 0;
1958 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
1959 goto nla_put_failure;
1961 nlmsg_end(skb, nlh);
1962 return skb->len;
1964 nla_put_failure:
1965 nlmsg_failure:
1966 nlmsg_cancel(skb, nlh);
1967 return -1;
1970 static int
1971 ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
1972 const struct nlmsghdr *nlh,
1973 const struct nlattr * const cda[])
1975 struct sk_buff *skb2;
1976 int err;
1978 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1979 if (skb2 == NULL)
1980 return -ENOMEM;
1982 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
1983 nlh->nlmsg_seq,
1984 NFNL_MSG_TYPE(nlh->nlmsg_type),
1985 sock_net(skb->sk));
1986 if (err <= 0)
1987 goto free;
1989 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1990 if (err < 0)
1991 goto out;
1993 return 0;
1995 free:
1996 kfree_skb(skb2);
1997 out:
1998 /* this avoids a loop in nfnetlink. */
1999 return err == -EAGAIN ? -ENOBUFS : err;
2002 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2003 static size_t
2004 ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
2006 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2007 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2008 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2009 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2010 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
2011 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2012 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2013 + nla_total_size(0) /* CTA_PROTOINFO */
2014 + nla_total_size(0) /* CTA_HELP */
2015 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2016 + ctnetlink_secctx_size(ct)
2017 #ifdef CONFIG_NF_NAT_NEEDED
2018 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2019 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2020 #endif
2021 #ifdef CONFIG_NF_CONNTRACK_MARK
2022 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2023 #endif
2024 + ctnetlink_proto_size(ct)
2028 static int
2029 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
2031 struct nlattr *nest_parms;
2033 rcu_read_lock();
2034 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
2035 if (!nest_parms)
2036 goto nla_put_failure;
2037 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2038 goto nla_put_failure;
2039 nla_nest_end(skb, nest_parms);
2041 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
2042 if (!nest_parms)
2043 goto nla_put_failure;
2044 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2045 goto nla_put_failure;
2046 nla_nest_end(skb, nest_parms);
2048 if (nf_ct_zone(ct)) {
2049 if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
2050 goto nla_put_failure;
2053 if (ctnetlink_dump_id(skb, ct) < 0)
2054 goto nla_put_failure;
2056 if (ctnetlink_dump_status(skb, ct) < 0)
2057 goto nla_put_failure;
2059 if (ctnetlink_dump_timeout(skb, ct) < 0)
2060 goto nla_put_failure;
2062 if (ctnetlink_dump_protoinfo(skb, ct) < 0)
2063 goto nla_put_failure;
2065 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2066 goto nla_put_failure;
2068 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2069 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2070 goto nla_put_failure;
2071 #endif
2072 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2073 goto nla_put_failure;
2075 if ((ct->status & IPS_SEQ_ADJUST) &&
2076 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
2077 goto nla_put_failure;
2079 #ifdef CONFIG_NF_CONNTRACK_MARK
2080 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2081 goto nla_put_failure;
2082 #endif
2083 if (ctnetlink_dump_labels(skb, ct) < 0)
2084 goto nla_put_failure;
2085 rcu_read_unlock();
2086 return 0;
2088 nla_put_failure:
2089 rcu_read_unlock();
2090 return -ENOSPC;
2093 static int
2094 ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2096 int err;
2098 if (cda[CTA_TIMEOUT]) {
2099 err = ctnetlink_change_timeout(ct, cda);
2100 if (err < 0)
2101 return err;
2103 if (cda[CTA_STATUS]) {
2104 err = ctnetlink_change_status(ct, cda);
2105 if (err < 0)
2106 return err;
2108 if (cda[CTA_HELP]) {
2109 err = ctnetlink_change_helper(ct, cda);
2110 if (err < 0)
2111 return err;
2113 if (cda[CTA_LABELS]) {
2114 err = ctnetlink_attach_labels(ct, cda);
2115 if (err < 0)
2116 return err;
2118 #if defined(CONFIG_NF_CONNTRACK_MARK)
2119 if (cda[CTA_MARK])
2120 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
2121 #endif
2122 return 0;
2125 static int
2126 ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
2128 struct nlattr *cda[CTA_MAX+1];
2129 int ret;
2131 ret = nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
2132 if (ret < 0)
2133 return ret;
2135 spin_lock_bh(&nf_conntrack_lock);
2136 ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
2137 spin_unlock_bh(&nf_conntrack_lock);
2139 return ret;
2142 static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
2143 .build_size = ctnetlink_nfqueue_build_size,
2144 .build = ctnetlink_nfqueue_build,
2145 .parse = ctnetlink_nfqueue_parse,
2147 #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
2149 /***********************************************************************
2150 * EXPECT
2151 ***********************************************************************/
2153 static inline int
2154 ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2155 const struct nf_conntrack_tuple *tuple,
2156 enum ctattr_expect type)
2158 struct nlattr *nest_parms;
2160 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
2161 if (!nest_parms)
2162 goto nla_put_failure;
2163 if (ctnetlink_dump_tuples(skb, tuple) < 0)
2164 goto nla_put_failure;
2165 nla_nest_end(skb, nest_parms);
2167 return 0;
2169 nla_put_failure:
2170 return -1;
2173 static inline int
2174 ctnetlink_exp_dump_mask(struct sk_buff *skb,
2175 const struct nf_conntrack_tuple *tuple,
2176 const struct nf_conntrack_tuple_mask *mask)
2178 int ret;
2179 struct nf_conntrack_l3proto *l3proto;
2180 struct nf_conntrack_l4proto *l4proto;
2181 struct nf_conntrack_tuple m;
2182 struct nlattr *nest_parms;
2184 memset(&m, 0xFF, sizeof(m));
2185 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2186 m.src.u.all = mask->src.u.all;
2187 m.dst.protonum = tuple->dst.protonum;
2189 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
2190 if (!nest_parms)
2191 goto nla_put_failure;
2193 rcu_read_lock();
2194 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
2195 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
2196 if (ret >= 0) {
2197 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
2198 tuple->dst.protonum);
2199 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2201 rcu_read_unlock();
2203 if (unlikely(ret < 0))
2204 goto nla_put_failure;
2206 nla_nest_end(skb, nest_parms);
2208 return 0;
2210 nla_put_failure:
2211 return -1;
2214 static const union nf_inet_addr any_addr;
2216 static int
2217 ctnetlink_exp_dump_expect(struct sk_buff *skb,
2218 const struct nf_conntrack_expect *exp)
2220 struct nf_conn *master = exp->master;
2221 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
2222 struct nf_conn_help *help;
2223 #ifdef CONFIG_NF_NAT_NEEDED
2224 struct nlattr *nest_parms;
2225 struct nf_conntrack_tuple nat_tuple = {};
2226 #endif
2227 struct nf_ct_helper_expectfn *expfn;
2229 if (timeout < 0)
2230 timeout = 0;
2232 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
2233 goto nla_put_failure;
2234 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
2235 goto nla_put_failure;
2236 if (ctnetlink_exp_dump_tuple(skb,
2237 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2238 CTA_EXPECT_MASTER) < 0)
2239 goto nla_put_failure;
2241 #ifdef CONFIG_NF_NAT_NEEDED
2242 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
2243 exp->saved_proto.all) {
2244 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
2245 if (!nest_parms)
2246 goto nla_put_failure;
2248 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
2249 goto nla_put_failure;
2251 nat_tuple.src.l3num = nf_ct_l3num(master);
2252 nat_tuple.src.u3 = exp->saved_addr;
2253 nat_tuple.dst.protonum = nf_ct_protonum(master);
2254 nat_tuple.src.u = exp->saved_proto;
2256 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
2257 CTA_EXPECT_NAT_TUPLE) < 0)
2258 goto nla_put_failure;
2259 nla_nest_end(skb, nest_parms);
2261 #endif
2262 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
2263 nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
2264 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
2265 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
2266 goto nla_put_failure;
2267 help = nfct_help(master);
2268 if (help) {
2269 struct nf_conntrack_helper *helper;
2271 helper = rcu_dereference(help->helper);
2272 if (helper &&
2273 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
2274 goto nla_put_failure;
2276 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
2277 if (expfn != NULL &&
2278 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
2279 goto nla_put_failure;
2281 return 0;
2283 nla_put_failure:
2284 return -1;
2287 static int
2288 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2289 int event, const struct nf_conntrack_expect *exp)
2291 struct nlmsghdr *nlh;
2292 struct nfgenmsg *nfmsg;
2293 unsigned int flags = portid ? NLM_F_MULTI : 0;
2295 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2296 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2297 if (nlh == NULL)
2298 goto nlmsg_failure;
2300 nfmsg = nlmsg_data(nlh);
2301 nfmsg->nfgen_family = exp->tuple.src.l3num;
2302 nfmsg->version = NFNETLINK_V0;
2303 nfmsg->res_id = 0;
2305 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2306 goto nla_put_failure;
2308 nlmsg_end(skb, nlh);
2309 return skb->len;
2311 nlmsg_failure:
2312 nla_put_failure:
2313 nlmsg_cancel(skb, nlh);
2314 return -1;
2317 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2318 static int
2319 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2321 struct nf_conntrack_expect *exp = item->exp;
2322 struct net *net = nf_ct_exp_net(exp);
2323 struct nlmsghdr *nlh;
2324 struct nfgenmsg *nfmsg;
2325 struct sk_buff *skb;
2326 unsigned int type, group;
2327 int flags = 0;
2329 if (events & (1 << IPEXP_DESTROY)) {
2330 type = IPCTNL_MSG_EXP_DELETE;
2331 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
2332 } else if (events & (1 << IPEXP_NEW)) {
2333 type = IPCTNL_MSG_EXP_NEW;
2334 flags = NLM_F_CREATE|NLM_F_EXCL;
2335 group = NFNLGRP_CONNTRACK_EXP_NEW;
2336 } else
2337 return 0;
2339 if (!item->report && !nfnetlink_has_listeners(net, group))
2340 return 0;
2342 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2343 if (skb == NULL)
2344 goto errout;
2346 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2347 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
2348 if (nlh == NULL)
2349 goto nlmsg_failure;
2351 nfmsg = nlmsg_data(nlh);
2352 nfmsg->nfgen_family = exp->tuple.src.l3num;
2353 nfmsg->version = NFNETLINK_V0;
2354 nfmsg->res_id = 0;
2356 rcu_read_lock();
2357 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2358 goto nla_put_failure;
2359 rcu_read_unlock();
2361 nlmsg_end(skb, nlh);
2362 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
2363 return 0;
2365 nla_put_failure:
2366 rcu_read_unlock();
2367 nlmsg_cancel(skb, nlh);
2368 nlmsg_failure:
2369 kfree_skb(skb);
2370 errout:
2371 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
2372 return 0;
2374 #endif
2375 static int ctnetlink_exp_done(struct netlink_callback *cb)
2377 if (cb->args[1])
2378 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
2379 return 0;
2382 static int
2383 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2385 struct net *net = sock_net(skb->sk);
2386 struct nf_conntrack_expect *exp, *last;
2387 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2388 u_int8_t l3proto = nfmsg->nfgen_family;
2390 rcu_read_lock();
2391 last = (struct nf_conntrack_expect *)cb->args[1];
2392 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2393 restart:
2394 hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
2395 hnode) {
2396 if (l3proto && exp->tuple.src.l3num != l3proto)
2397 continue;
2398 if (cb->args[1]) {
2399 if (exp != last)
2400 continue;
2401 cb->args[1] = 0;
2403 if (ctnetlink_exp_fill_info(skb,
2404 NETLINK_CB(cb->skb).portid,
2405 cb->nlh->nlmsg_seq,
2406 IPCTNL_MSG_EXP_NEW,
2407 exp) < 0) {
2408 if (!atomic_inc_not_zero(&exp->use))
2409 continue;
2410 cb->args[1] = (unsigned long)exp;
2411 goto out;
2414 if (cb->args[1]) {
2415 cb->args[1] = 0;
2416 goto restart;
2419 out:
2420 rcu_read_unlock();
2421 if (last)
2422 nf_ct_expect_put(last);
2424 return skb->len;
2427 static int
2428 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2430 struct nf_conntrack_expect *exp, *last;
2431 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2432 struct nf_conn *ct = cb->data;
2433 struct nf_conn_help *help = nfct_help(ct);
2434 u_int8_t l3proto = nfmsg->nfgen_family;
2436 if (cb->args[0])
2437 return 0;
2439 rcu_read_lock();
2440 last = (struct nf_conntrack_expect *)cb->args[1];
2441 restart:
2442 hlist_for_each_entry(exp, &help->expectations, lnode) {
2443 if (l3proto && exp->tuple.src.l3num != l3proto)
2444 continue;
2445 if (cb->args[1]) {
2446 if (exp != last)
2447 continue;
2448 cb->args[1] = 0;
2450 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
2451 cb->nlh->nlmsg_seq,
2452 IPCTNL_MSG_EXP_NEW,
2453 exp) < 0) {
2454 if (!atomic_inc_not_zero(&exp->use))
2455 continue;
2456 cb->args[1] = (unsigned long)exp;
2457 goto out;
2460 if (cb->args[1]) {
2461 cb->args[1] = 0;
2462 goto restart;
2464 cb->args[0] = 1;
2465 out:
2466 rcu_read_unlock();
2467 if (last)
2468 nf_ct_expect_put(last);
2470 return skb->len;
2473 static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
2474 const struct nlmsghdr *nlh,
2475 const struct nlattr * const cda[])
2477 int err;
2478 struct net *net = sock_net(ctnl);
2479 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2480 u_int8_t u3 = nfmsg->nfgen_family;
2481 struct nf_conntrack_tuple tuple;
2482 struct nf_conntrack_tuple_hash *h;
2483 struct nf_conn *ct;
2484 u16 zone = 0;
2485 struct netlink_dump_control c = {
2486 .dump = ctnetlink_exp_ct_dump_table,
2487 .done = ctnetlink_exp_done,
2490 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2491 if (err < 0)
2492 return err;
2494 if (cda[CTA_EXPECT_ZONE]) {
2495 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2496 if (err < 0)
2497 return err;
2500 h = nf_conntrack_find_get(net, zone, &tuple);
2501 if (!h)
2502 return -ENOENT;
2504 ct = nf_ct_tuplehash_to_ctrack(h);
2505 c.data = ct;
2507 err = netlink_dump_start(ctnl, skb, nlh, &c);
2508 nf_ct_put(ct);
2510 return err;
2513 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2514 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2515 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
2516 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2517 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2518 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2519 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2520 .len = NF_CT_HELPER_NAME_LEN - 1 },
2521 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2522 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2523 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2524 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2525 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2528 static int
2529 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2530 const struct nlmsghdr *nlh,
2531 const struct nlattr * const cda[])
2533 struct net *net = sock_net(ctnl);
2534 struct nf_conntrack_tuple tuple;
2535 struct nf_conntrack_expect *exp;
2536 struct sk_buff *skb2;
2537 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2538 u_int8_t u3 = nfmsg->nfgen_family;
2539 u16 zone;
2540 int err;
2542 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2543 if (cda[CTA_EXPECT_MASTER])
2544 return ctnetlink_dump_exp_ct(ctnl, skb, nlh, cda);
2545 else {
2546 struct netlink_dump_control c = {
2547 .dump = ctnetlink_exp_dump_table,
2548 .done = ctnetlink_exp_done,
2550 return netlink_dump_start(ctnl, skb, nlh, &c);
2554 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2555 if (err < 0)
2556 return err;
2558 if (cda[CTA_EXPECT_TUPLE])
2559 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2560 else if (cda[CTA_EXPECT_MASTER])
2561 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2562 else
2563 return -EINVAL;
2565 if (err < 0)
2566 return err;
2568 exp = nf_ct_expect_find_get(net, zone, &tuple);
2569 if (!exp)
2570 return -ENOENT;
2572 if (cda[CTA_EXPECT_ID]) {
2573 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2574 if (ntohl(id) != (u32)(unsigned long)exp) {
2575 nf_ct_expect_put(exp);
2576 return -ENOENT;
2580 err = -ENOMEM;
2581 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2582 if (skb2 == NULL) {
2583 nf_ct_expect_put(exp);
2584 goto out;
2587 rcu_read_lock();
2588 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
2589 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
2590 rcu_read_unlock();
2591 nf_ct_expect_put(exp);
2592 if (err <= 0)
2593 goto free;
2595 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2596 if (err < 0)
2597 goto out;
2599 return 0;
2601 free:
2602 kfree_skb(skb2);
2603 out:
2604 /* this avoids a loop in nfnetlink. */
2605 return err == -EAGAIN ? -ENOBUFS : err;
2608 static int
2609 ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2610 const struct nlmsghdr *nlh,
2611 const struct nlattr * const cda[])
2613 struct net *net = sock_net(ctnl);
2614 struct nf_conntrack_expect *exp;
2615 struct nf_conntrack_tuple tuple;
2616 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2617 struct hlist_node *next;
2618 u_int8_t u3 = nfmsg->nfgen_family;
2619 unsigned int i;
2620 u16 zone;
2621 int err;
2623 if (cda[CTA_EXPECT_TUPLE]) {
2624 /* delete a single expect by tuple */
2625 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2626 if (err < 0)
2627 return err;
2629 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2630 if (err < 0)
2631 return err;
2633 /* bump usage count to 2 */
2634 exp = nf_ct_expect_find_get(net, zone, &tuple);
2635 if (!exp)
2636 return -ENOENT;
2638 if (cda[CTA_EXPECT_ID]) {
2639 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2640 if (ntohl(id) != (u32)(unsigned long)exp) {
2641 nf_ct_expect_put(exp);
2642 return -ENOENT;
2646 /* after list removal, usage count == 1 */
2647 spin_lock_bh(&nf_conntrack_lock);
2648 if (del_timer(&exp->timeout)) {
2649 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
2650 nlmsg_report(nlh));
2651 nf_ct_expect_put(exp);
2653 spin_unlock_bh(&nf_conntrack_lock);
2654 /* have to put what we 'get' above.
2655 * after this line usage count == 0 */
2656 nf_ct_expect_put(exp);
2657 } else if (cda[CTA_EXPECT_HELP_NAME]) {
2658 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2659 struct nf_conn_help *m_help;
2661 /* delete all expectations for this helper */
2662 spin_lock_bh(&nf_conntrack_lock);
2663 for (i = 0; i < nf_ct_expect_hsize; i++) {
2664 hlist_for_each_entry_safe(exp, next,
2665 &net->ct.expect_hash[i],
2666 hnode) {
2667 m_help = nfct_help(exp->master);
2668 if (!strcmp(m_help->helper->name, name) &&
2669 del_timer(&exp->timeout)) {
2670 nf_ct_unlink_expect_report(exp,
2671 NETLINK_CB(skb).portid,
2672 nlmsg_report(nlh));
2673 nf_ct_expect_put(exp);
2677 spin_unlock_bh(&nf_conntrack_lock);
2678 } else {
2679 /* This basically means we have to flush everything*/
2680 spin_lock_bh(&nf_conntrack_lock);
2681 for (i = 0; i < nf_ct_expect_hsize; i++) {
2682 hlist_for_each_entry_safe(exp, next,
2683 &net->ct.expect_hash[i],
2684 hnode) {
2685 if (del_timer(&exp->timeout)) {
2686 nf_ct_unlink_expect_report(exp,
2687 NETLINK_CB(skb).portid,
2688 nlmsg_report(nlh));
2689 nf_ct_expect_put(exp);
2693 spin_unlock_bh(&nf_conntrack_lock);
2696 return 0;
2698 static int
2699 ctnetlink_change_expect(struct nf_conntrack_expect *x,
2700 const struct nlattr * const cda[])
2702 if (cda[CTA_EXPECT_TIMEOUT]) {
2703 if (!del_timer(&x->timeout))
2704 return -ETIME;
2706 x->timeout.expires = jiffies +
2707 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2708 add_timer(&x->timeout);
2710 return 0;
2713 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2714 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
2715 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
2718 static int
2719 ctnetlink_parse_expect_nat(const struct nlattr *attr,
2720 struct nf_conntrack_expect *exp,
2721 u_int8_t u3)
2723 #ifdef CONFIG_NF_NAT_NEEDED
2724 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2725 struct nf_conntrack_tuple nat_tuple = {};
2726 int err;
2728 err = nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2729 if (err < 0)
2730 return err;
2732 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2733 return -EINVAL;
2735 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2736 &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2737 if (err < 0)
2738 return err;
2740 exp->saved_addr = nat_tuple.src.u3;
2741 exp->saved_proto = nat_tuple.src.u;
2742 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2744 return 0;
2745 #else
2746 return -EOPNOTSUPP;
2747 #endif
2750 static int
2751 ctnetlink_create_expect(struct net *net, u16 zone,
2752 const struct nlattr * const cda[],
2753 u_int8_t u3,
2754 u32 portid, int report)
2756 struct nf_conntrack_tuple tuple, mask, master_tuple;
2757 struct nf_conntrack_tuple_hash *h = NULL;
2758 struct nf_conntrack_expect *exp;
2759 struct nf_conn *ct;
2760 struct nf_conn_help *help;
2761 struct nf_conntrack_helper *helper = NULL;
2762 u_int32_t class = 0;
2763 int err = 0;
2765 /* caller guarantees that those three CTA_EXPECT_* exist */
2766 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2767 if (err < 0)
2768 return err;
2769 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2770 if (err < 0)
2771 return err;
2772 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2773 if (err < 0)
2774 return err;
2776 /* Look for master conntrack of this expectation */
2777 h = nf_conntrack_find_get(net, zone, &master_tuple);
2778 if (!h)
2779 return -ENOENT;
2780 ct = nf_ct_tuplehash_to_ctrack(h);
2782 /* Look for helper of this expectation */
2783 if (cda[CTA_EXPECT_HELP_NAME]) {
2784 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2786 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2787 nf_ct_protonum(ct));
2788 if (helper == NULL) {
2789 #ifdef CONFIG_MODULES
2790 if (request_module("nfct-helper-%s", helpname) < 0) {
2791 err = -EOPNOTSUPP;
2792 goto out;
2795 helper = __nf_conntrack_helper_find(helpname,
2796 nf_ct_l3num(ct),
2797 nf_ct_protonum(ct));
2798 if (helper) {
2799 err = -EAGAIN;
2800 goto out;
2802 #endif
2803 err = -EOPNOTSUPP;
2804 goto out;
2808 if (cda[CTA_EXPECT_CLASS] && helper) {
2809 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2810 if (class > helper->expect_class_max) {
2811 err = -EINVAL;
2812 goto out;
2815 exp = nf_ct_expect_alloc(ct);
2816 if (!exp) {
2817 err = -ENOMEM;
2818 goto out;
2820 help = nfct_help(ct);
2821 if (!help) {
2822 if (!cda[CTA_EXPECT_TIMEOUT]) {
2823 err = -EINVAL;
2824 goto err_out;
2826 exp->timeout.expires =
2827 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2829 exp->flags = NF_CT_EXPECT_USERSPACE;
2830 if (cda[CTA_EXPECT_FLAGS]) {
2831 exp->flags |=
2832 ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2834 } else {
2835 if (cda[CTA_EXPECT_FLAGS]) {
2836 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2837 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
2838 } else
2839 exp->flags = 0;
2841 if (cda[CTA_EXPECT_FN]) {
2842 const char *name = nla_data(cda[CTA_EXPECT_FN]);
2843 struct nf_ct_helper_expectfn *expfn;
2845 expfn = nf_ct_helper_expectfn_find_by_name(name);
2846 if (expfn == NULL) {
2847 err = -EINVAL;
2848 goto err_out;
2850 exp->expectfn = expfn->expectfn;
2851 } else
2852 exp->expectfn = NULL;
2854 exp->class = class;
2855 exp->master = ct;
2856 exp->helper = helper;
2857 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
2858 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
2859 exp->mask.src.u.all = mask.src.u.all;
2861 if (cda[CTA_EXPECT_NAT]) {
2862 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2863 exp, u3);
2864 if (err < 0)
2865 goto err_out;
2867 err = nf_ct_expect_related_report(exp, portid, report);
2868 err_out:
2869 nf_ct_expect_put(exp);
2870 out:
2871 nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
2872 return err;
2875 static int
2876 ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2877 const struct nlmsghdr *nlh,
2878 const struct nlattr * const cda[])
2880 struct net *net = sock_net(ctnl);
2881 struct nf_conntrack_tuple tuple;
2882 struct nf_conntrack_expect *exp;
2883 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2884 u_int8_t u3 = nfmsg->nfgen_family;
2885 u16 zone;
2886 int err;
2888 if (!cda[CTA_EXPECT_TUPLE]
2889 || !cda[CTA_EXPECT_MASK]
2890 || !cda[CTA_EXPECT_MASTER])
2891 return -EINVAL;
2893 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2894 if (err < 0)
2895 return err;
2897 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2898 if (err < 0)
2899 return err;
2901 spin_lock_bh(&nf_conntrack_lock);
2902 exp = __nf_ct_expect_find(net, zone, &tuple);
2904 if (!exp) {
2905 spin_unlock_bh(&nf_conntrack_lock);
2906 err = -ENOENT;
2907 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2908 err = ctnetlink_create_expect(net, zone, cda,
2910 NETLINK_CB(skb).portid,
2911 nlmsg_report(nlh));
2913 return err;
2916 err = -EEXIST;
2917 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
2918 err = ctnetlink_change_expect(exp, cda);
2919 spin_unlock_bh(&nf_conntrack_lock);
2921 return err;
2924 static int
2925 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
2926 const struct ip_conntrack_stat *st)
2928 struct nlmsghdr *nlh;
2929 struct nfgenmsg *nfmsg;
2930 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2932 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
2933 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2934 if (nlh == NULL)
2935 goto nlmsg_failure;
2937 nfmsg = nlmsg_data(nlh);
2938 nfmsg->nfgen_family = AF_UNSPEC;
2939 nfmsg->version = NFNETLINK_V0;
2940 nfmsg->res_id = htons(cpu);
2942 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
2943 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
2944 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
2945 goto nla_put_failure;
2947 nlmsg_end(skb, nlh);
2948 return skb->len;
2950 nla_put_failure:
2951 nlmsg_failure:
2952 nlmsg_cancel(skb, nlh);
2953 return -1;
2956 static int
2957 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2959 int cpu;
2960 struct net *net = sock_net(skb->sk);
2962 if (cb->args[0] == nr_cpu_ids)
2963 return 0;
2965 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2966 const struct ip_conntrack_stat *st;
2968 if (!cpu_possible(cpu))
2969 continue;
2971 st = per_cpu_ptr(net->ct.stat, cpu);
2972 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
2973 cb->nlh->nlmsg_seq,
2974 cpu, st) < 0)
2975 break;
2977 cb->args[0] = cpu;
2979 return skb->len;
2982 static int
2983 ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
2984 const struct nlmsghdr *nlh,
2985 const struct nlattr * const cda[])
2987 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2988 struct netlink_dump_control c = {
2989 .dump = ctnetlink_exp_stat_cpu_dump,
2991 return netlink_dump_start(ctnl, skb, nlh, &c);
2994 return 0;
2997 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2998 static struct nf_ct_event_notifier ctnl_notifier = {
2999 .fcn = ctnetlink_conntrack_event,
3002 static struct nf_exp_event_notifier ctnl_notifier_exp = {
3003 .fcn = ctnetlink_expect_event,
3005 #endif
3007 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3008 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
3009 .attr_count = CTA_MAX,
3010 .policy = ct_nla_policy },
3011 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
3012 .attr_count = CTA_MAX,
3013 .policy = ct_nla_policy },
3014 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
3015 .attr_count = CTA_MAX,
3016 .policy = ct_nla_policy },
3017 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
3018 .attr_count = CTA_MAX,
3019 .policy = ct_nla_policy },
3020 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
3021 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
3022 [IPCTNL_MSG_CT_GET_DYING] = { .call = ctnetlink_get_ct_dying },
3023 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { .call = ctnetlink_get_ct_unconfirmed },
3026 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3027 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
3028 .attr_count = CTA_EXPECT_MAX,
3029 .policy = exp_nla_policy },
3030 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
3031 .attr_count = CTA_EXPECT_MAX,
3032 .policy = exp_nla_policy },
3033 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
3034 .attr_count = CTA_EXPECT_MAX,
3035 .policy = exp_nla_policy },
3036 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
3039 static const struct nfnetlink_subsystem ctnl_subsys = {
3040 .name = "conntrack",
3041 .subsys_id = NFNL_SUBSYS_CTNETLINK,
3042 .cb_count = IPCTNL_MSG_MAX,
3043 .cb = ctnl_cb,
3046 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3047 .name = "conntrack_expect",
3048 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
3049 .cb_count = IPCTNL_MSG_EXP_MAX,
3050 .cb = ctnl_exp_cb,
3053 MODULE_ALIAS("ip_conntrack_netlink");
3054 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3055 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3057 static int __net_init ctnetlink_net_init(struct net *net)
3059 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3060 int ret;
3062 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
3063 if (ret < 0) {
3064 pr_err("ctnetlink_init: cannot register notifier.\n");
3065 goto err_out;
3068 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
3069 if (ret < 0) {
3070 pr_err("ctnetlink_init: cannot expect register notifier.\n");
3071 goto err_unreg_notifier;
3073 #endif
3074 return 0;
3076 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3077 err_unreg_notifier:
3078 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3079 err_out:
3080 return ret;
3081 #endif
3084 static void ctnetlink_net_exit(struct net *net)
3086 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3087 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
3088 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3089 #endif
3092 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
3094 struct net *net;
3096 list_for_each_entry(net, net_exit_list, exit_list)
3097 ctnetlink_net_exit(net);
3100 static struct pernet_operations ctnetlink_net_ops = {
3101 .init = ctnetlink_net_init,
3102 .exit_batch = ctnetlink_net_exit_batch,
3105 static int __init ctnetlink_init(void)
3107 int ret;
3109 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
3110 ret = nfnetlink_subsys_register(&ctnl_subsys);
3111 if (ret < 0) {
3112 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3113 goto err_out;
3116 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3117 if (ret < 0) {
3118 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3119 goto err_unreg_subsys;
3122 ret = register_pernet_subsys(&ctnetlink_net_ops);
3123 if (ret < 0) {
3124 pr_err("ctnetlink_init: cannot register pernet operations\n");
3125 goto err_unreg_exp_subsys;
3127 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3128 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3129 RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
3130 #endif
3131 return 0;
3133 err_unreg_exp_subsys:
3134 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3135 err_unreg_subsys:
3136 nfnetlink_subsys_unregister(&ctnl_subsys);
3137 err_out:
3138 return ret;
3141 static void __exit ctnetlink_exit(void)
3143 pr_info("ctnetlink: unregistering from nfnetlink.\n");
3145 unregister_pernet_subsys(&ctnetlink_net_ops);
3146 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3147 nfnetlink_subsys_unregister(&ctnl_subsys);
3148 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3149 RCU_INIT_POINTER(nfq_ct_hook, NULL);
3150 #endif
3153 module_init(ctnetlink_init);
3154 module_exit(ctnetlink_exit);