ip: fix warning in xfrm4_mode_tunnel_input
[linux-2.6/btrfs-unstable.git] / net / netfilter / nf_conntrack_netlink.c
blob2334cc5d2b16ec3b95284458a9126655a9aea91b
1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
35 #include <net/sock.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_tuple.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_zones.h>
45 #include <net/netfilter/nf_conntrack_timestamp.h>
46 #include <net/netfilter/nf_conntrack_labels.h>
47 #ifdef CONFIG_NF_NAT_NEEDED
48 #include <net/netfilter/nf_nat_core.h>
49 #include <net/netfilter/nf_nat_l4proto.h>
50 #include <net/netfilter/nf_nat_helper.h>
51 #endif
53 #include <linux/netfilter/nfnetlink.h>
54 #include <linux/netfilter/nfnetlink_conntrack.h>
56 MODULE_LICENSE("GPL");
58 static char __initdata version[] = "0.93";
60 static inline int
61 ctnetlink_dump_tuples_proto(struct sk_buff *skb,
62 const struct nf_conntrack_tuple *tuple,
63 struct nf_conntrack_l4proto *l4proto)
65 int ret = 0;
66 struct nlattr *nest_parms;
68 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
69 if (!nest_parms)
70 goto nla_put_failure;
71 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
72 goto nla_put_failure;
74 if (likely(l4proto->tuple_to_nlattr))
75 ret = l4proto->tuple_to_nlattr(skb, tuple);
77 nla_nest_end(skb, nest_parms);
79 return ret;
81 nla_put_failure:
82 return -1;
85 static inline int
86 ctnetlink_dump_tuples_ip(struct sk_buff *skb,
87 const struct nf_conntrack_tuple *tuple,
88 struct nf_conntrack_l3proto *l3proto)
90 int ret = 0;
91 struct nlattr *nest_parms;
93 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
94 if (!nest_parms)
95 goto nla_put_failure;
97 if (likely(l3proto->tuple_to_nlattr))
98 ret = l3proto->tuple_to_nlattr(skb, tuple);
100 nla_nest_end(skb, nest_parms);
102 return ret;
104 nla_put_failure:
105 return -1;
108 static int
109 ctnetlink_dump_tuples(struct sk_buff *skb,
110 const struct nf_conntrack_tuple *tuple)
112 int ret;
113 struct nf_conntrack_l3proto *l3proto;
114 struct nf_conntrack_l4proto *l4proto;
116 rcu_read_lock();
117 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
118 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
120 if (ret >= 0) {
121 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
122 tuple->dst.protonum);
123 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
125 rcu_read_unlock();
126 return ret;
129 static inline int
130 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
132 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
133 goto nla_put_failure;
134 return 0;
136 nla_put_failure:
137 return -1;
140 static inline int
141 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
143 long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
145 if (timeout < 0)
146 timeout = 0;
148 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
149 goto nla_put_failure;
150 return 0;
152 nla_put_failure:
153 return -1;
156 static inline int
157 ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
159 struct nf_conntrack_l4proto *l4proto;
160 struct nlattr *nest_proto;
161 int ret;
163 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
164 if (!l4proto->to_nlattr)
165 return 0;
167 nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
168 if (!nest_proto)
169 goto nla_put_failure;
171 ret = l4proto->to_nlattr(skb, nest_proto, ct);
173 nla_nest_end(skb, nest_proto);
175 return ret;
177 nla_put_failure:
178 return -1;
181 static inline int
182 ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
184 struct nlattr *nest_helper;
185 const struct nf_conn_help *help = nfct_help(ct);
186 struct nf_conntrack_helper *helper;
188 if (!help)
189 return 0;
191 helper = rcu_dereference(help->helper);
192 if (!helper)
193 goto out;
195 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
196 if (!nest_helper)
197 goto nla_put_failure;
198 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
199 goto nla_put_failure;
201 if (helper->to_nlattr)
202 helper->to_nlattr(skb, ct);
204 nla_nest_end(skb, nest_helper);
205 out:
206 return 0;
208 nla_put_failure:
209 return -1;
212 static int
213 dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
214 enum ip_conntrack_dir dir)
216 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
217 struct nlattr *nest_count;
219 nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
220 if (!nest_count)
221 goto nla_put_failure;
223 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
224 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
225 goto nla_put_failure;
227 nla_nest_end(skb, nest_count);
229 return 0;
231 nla_put_failure:
232 return -1;
235 static int
236 ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
237 enum ip_conntrack_dir dir, int type)
239 struct nf_conn_counter *acct;
240 u64 pkts, bytes;
242 acct = nf_conn_acct_find(ct);
243 if (!acct)
244 return 0;
246 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
247 pkts = atomic64_xchg(&acct[dir].packets, 0);
248 bytes = atomic64_xchg(&acct[dir].bytes, 0);
249 } else {
250 pkts = atomic64_read(&acct[dir].packets);
251 bytes = atomic64_read(&acct[dir].bytes);
253 return dump_counters(skb, pkts, bytes, dir);
256 static int
257 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
259 struct nlattr *nest_count;
260 const struct nf_conn_tstamp *tstamp;
262 tstamp = nf_conn_tstamp_find(ct);
263 if (!tstamp)
264 return 0;
266 nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
267 if (!nest_count)
268 goto nla_put_failure;
270 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
271 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
272 cpu_to_be64(tstamp->stop))))
273 goto nla_put_failure;
274 nla_nest_end(skb, nest_count);
276 return 0;
278 nla_put_failure:
279 return -1;
282 #ifdef CONFIG_NF_CONNTRACK_MARK
283 static inline int
284 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
286 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
287 goto nla_put_failure;
288 return 0;
290 nla_put_failure:
291 return -1;
293 #else
294 #define ctnetlink_dump_mark(a, b) (0)
295 #endif
297 #ifdef CONFIG_NF_CONNTRACK_SECMARK
298 static inline int
299 ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
301 struct nlattr *nest_secctx;
302 int len, ret;
303 char *secctx;
305 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
306 if (ret)
307 return 0;
309 ret = -1;
310 nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
311 if (!nest_secctx)
312 goto nla_put_failure;
314 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
315 goto nla_put_failure;
316 nla_nest_end(skb, nest_secctx);
318 ret = 0;
319 nla_put_failure:
320 security_release_secctx(secctx, len);
321 return ret;
323 #else
324 #define ctnetlink_dump_secctx(a, b) (0)
325 #endif
327 #ifdef CONFIG_NF_CONNTRACK_LABELS
328 static int ctnetlink_label_size(const struct nf_conn *ct)
330 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
332 if (!labels)
333 return 0;
334 return nla_total_size(labels->words * sizeof(long));
337 static int
338 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
340 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
341 unsigned int len, i;
343 if (!labels)
344 return 0;
346 len = labels->words * sizeof(long);
347 i = 0;
348 do {
349 if (labels->bits[i] != 0)
350 return nla_put(skb, CTA_LABELS, len, labels->bits);
351 i++;
352 } while (i < labels->words);
354 return 0;
356 #else
357 #define ctnetlink_dump_labels(a, b) (0)
358 #define ctnetlink_label_size(a) (0)
359 #endif
361 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
363 static inline int
364 ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
366 struct nlattr *nest_parms;
368 if (!(ct->status & IPS_EXPECTED))
369 return 0;
371 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
372 if (!nest_parms)
373 goto nla_put_failure;
374 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
375 goto nla_put_failure;
376 nla_nest_end(skb, nest_parms);
378 return 0;
380 nla_put_failure:
381 return -1;
384 #ifdef CONFIG_NF_NAT_NEEDED
385 static int
386 dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
388 struct nlattr *nest_parms;
390 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
391 if (!nest_parms)
392 goto nla_put_failure;
394 if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
395 htonl(natseq->correction_pos)) ||
396 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
397 htonl(natseq->offset_before)) ||
398 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
399 htonl(natseq->offset_after)))
400 goto nla_put_failure;
402 nla_nest_end(skb, nest_parms);
404 return 0;
406 nla_put_failure:
407 return -1;
410 static inline int
411 ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
413 struct nf_nat_seq *natseq;
414 struct nf_conn_nat *nat = nfct_nat(ct);
416 if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
417 return 0;
419 natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
420 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
421 return -1;
423 natseq = &nat->seq[IP_CT_DIR_REPLY];
424 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
425 return -1;
427 return 0;
429 #else
430 #define ctnetlink_dump_nat_seq_adj(a, b) (0)
431 #endif
433 static inline int
434 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
436 if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
437 goto nla_put_failure;
438 return 0;
440 nla_put_failure:
441 return -1;
444 static inline int
445 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
447 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
448 goto nla_put_failure;
449 return 0;
451 nla_put_failure:
452 return -1;
455 static int
456 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
457 struct nf_conn *ct)
459 struct nlmsghdr *nlh;
460 struct nfgenmsg *nfmsg;
461 struct nlattr *nest_parms;
462 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
464 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
465 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
466 if (nlh == NULL)
467 goto nlmsg_failure;
469 nfmsg = nlmsg_data(nlh);
470 nfmsg->nfgen_family = nf_ct_l3num(ct);
471 nfmsg->version = NFNETLINK_V0;
472 nfmsg->res_id = 0;
474 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
475 if (!nest_parms)
476 goto nla_put_failure;
477 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
478 goto nla_put_failure;
479 nla_nest_end(skb, nest_parms);
481 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
482 if (!nest_parms)
483 goto nla_put_failure;
484 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
485 goto nla_put_failure;
486 nla_nest_end(skb, nest_parms);
488 if (nf_ct_zone(ct) &&
489 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
490 goto nla_put_failure;
492 if (ctnetlink_dump_status(skb, ct) < 0 ||
493 ctnetlink_dump_timeout(skb, ct) < 0 ||
494 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
495 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
496 ctnetlink_dump_timestamp(skb, ct) < 0 ||
497 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
498 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
499 ctnetlink_dump_mark(skb, ct) < 0 ||
500 ctnetlink_dump_secctx(skb, ct) < 0 ||
501 ctnetlink_dump_labels(skb, ct) < 0 ||
502 ctnetlink_dump_id(skb, ct) < 0 ||
503 ctnetlink_dump_use(skb, ct) < 0 ||
504 ctnetlink_dump_master(skb, ct) < 0 ||
505 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
506 goto nla_put_failure;
508 nlmsg_end(skb, nlh);
509 return skb->len;
511 nlmsg_failure:
512 nla_put_failure:
513 nlmsg_cancel(skb, nlh);
514 return -1;
517 static inline size_t
518 ctnetlink_proto_size(const struct nf_conn *ct)
520 struct nf_conntrack_l3proto *l3proto;
521 struct nf_conntrack_l4proto *l4proto;
522 size_t len = 0;
524 rcu_read_lock();
525 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
526 len += l3proto->nla_size;
528 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
529 len += l4proto->nla_size;
530 rcu_read_unlock();
532 return len;
535 static inline size_t
536 ctnetlink_counters_size(const struct nf_conn *ct)
538 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
539 return 0;
540 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
541 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
542 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
546 static inline int
547 ctnetlink_secctx_size(const struct nf_conn *ct)
549 #ifdef CONFIG_NF_CONNTRACK_SECMARK
550 int len, ret;
552 ret = security_secid_to_secctx(ct->secmark, NULL, &len);
553 if (ret)
554 return 0;
556 return nla_total_size(0) /* CTA_SECCTX */
557 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
558 #else
559 return 0;
560 #endif
563 static inline size_t
564 ctnetlink_timestamp_size(const struct nf_conn *ct)
566 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
567 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
568 return 0;
569 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
570 #else
571 return 0;
572 #endif
575 static inline size_t
576 ctnetlink_nlmsg_size(const struct nf_conn *ct)
578 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
579 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
580 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
581 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
582 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
583 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
584 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
585 + ctnetlink_counters_size(ct)
586 + ctnetlink_timestamp_size(ct)
587 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
588 + nla_total_size(0) /* CTA_PROTOINFO */
589 + nla_total_size(0) /* CTA_HELP */
590 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
591 + ctnetlink_secctx_size(ct)
592 #ifdef CONFIG_NF_NAT_NEEDED
593 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
594 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
595 #endif
596 #ifdef CONFIG_NF_CONNTRACK_MARK
597 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
598 #endif
599 + ctnetlink_proto_size(ct)
600 + ctnetlink_label_size(ct)
604 #ifdef CONFIG_NF_CONNTRACK_EVENTS
605 static int
606 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
608 struct net *net;
609 struct nlmsghdr *nlh;
610 struct nfgenmsg *nfmsg;
611 struct nlattr *nest_parms;
612 struct nf_conn *ct = item->ct;
613 struct sk_buff *skb;
614 unsigned int type;
615 unsigned int flags = 0, group;
616 int err;
618 /* ignore our fake conntrack entry */
619 if (nf_ct_is_untracked(ct))
620 return 0;
622 if (events & (1 << IPCT_DESTROY)) {
623 type = IPCTNL_MSG_CT_DELETE;
624 group = NFNLGRP_CONNTRACK_DESTROY;
625 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
626 type = IPCTNL_MSG_CT_NEW;
627 flags = NLM_F_CREATE|NLM_F_EXCL;
628 group = NFNLGRP_CONNTRACK_NEW;
629 } else if (events) {
630 type = IPCTNL_MSG_CT_NEW;
631 group = NFNLGRP_CONNTRACK_UPDATE;
632 } else
633 return 0;
635 net = nf_ct_net(ct);
636 if (!item->report && !nfnetlink_has_listeners(net, group))
637 return 0;
639 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
640 if (skb == NULL)
641 goto errout;
643 type |= NFNL_SUBSYS_CTNETLINK << 8;
644 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
645 if (nlh == NULL)
646 goto nlmsg_failure;
648 nfmsg = nlmsg_data(nlh);
649 nfmsg->nfgen_family = nf_ct_l3num(ct);
650 nfmsg->version = NFNETLINK_V0;
651 nfmsg->res_id = 0;
653 rcu_read_lock();
654 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
655 if (!nest_parms)
656 goto nla_put_failure;
657 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
658 goto nla_put_failure;
659 nla_nest_end(skb, nest_parms);
661 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
662 if (!nest_parms)
663 goto nla_put_failure;
664 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
665 goto nla_put_failure;
666 nla_nest_end(skb, nest_parms);
668 if (nf_ct_zone(ct) &&
669 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
670 goto nla_put_failure;
672 if (ctnetlink_dump_id(skb, ct) < 0)
673 goto nla_put_failure;
675 if (ctnetlink_dump_status(skb, ct) < 0)
676 goto nla_put_failure;
678 if (events & (1 << IPCT_DESTROY)) {
679 if (ctnetlink_dump_counters(skb, ct,
680 IP_CT_DIR_ORIGINAL, type) < 0 ||
681 ctnetlink_dump_counters(skb, ct,
682 IP_CT_DIR_REPLY, type) < 0 ||
683 ctnetlink_dump_timestamp(skb, ct) < 0)
684 goto nla_put_failure;
685 } else {
686 if (ctnetlink_dump_timeout(skb, ct) < 0)
687 goto nla_put_failure;
689 if (events & (1 << IPCT_PROTOINFO)
690 && ctnetlink_dump_protoinfo(skb, ct) < 0)
691 goto nla_put_failure;
693 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
694 && ctnetlink_dump_helpinfo(skb, ct) < 0)
695 goto nla_put_failure;
697 #ifdef CONFIG_NF_CONNTRACK_SECMARK
698 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
699 && ctnetlink_dump_secctx(skb, ct) < 0)
700 goto nla_put_failure;
701 #endif
702 if (events & (1 << IPCT_LABEL) &&
703 ctnetlink_dump_labels(skb, ct) < 0)
704 goto nla_put_failure;
706 if (events & (1 << IPCT_RELATED) &&
707 ctnetlink_dump_master(skb, ct) < 0)
708 goto nla_put_failure;
710 if (events & (1 << IPCT_NATSEQADJ) &&
711 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
712 goto nla_put_failure;
715 #ifdef CONFIG_NF_CONNTRACK_MARK
716 if ((events & (1 << IPCT_MARK) || ct->mark)
717 && ctnetlink_dump_mark(skb, ct) < 0)
718 goto nla_put_failure;
719 #endif
720 rcu_read_unlock();
722 nlmsg_end(skb, nlh);
723 err = nfnetlink_send(skb, net, item->portid, group, item->report,
724 GFP_ATOMIC);
725 if (err == -ENOBUFS || err == -EAGAIN)
726 return -ENOBUFS;
728 return 0;
730 nla_put_failure:
731 rcu_read_unlock();
732 nlmsg_cancel(skb, nlh);
733 nlmsg_failure:
734 kfree_skb(skb);
735 errout:
736 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
737 return -ENOBUFS;
739 return 0;
741 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
743 static int ctnetlink_done(struct netlink_callback *cb)
745 if (cb->args[1])
746 nf_ct_put((struct nf_conn *)cb->args[1]);
747 if (cb->data)
748 kfree(cb->data);
749 return 0;
752 struct ctnetlink_dump_filter {
753 struct {
754 u_int32_t val;
755 u_int32_t mask;
756 } mark;
759 static int
760 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
762 struct net *net = sock_net(skb->sk);
763 struct nf_conn *ct, *last;
764 struct nf_conntrack_tuple_hash *h;
765 struct hlist_nulls_node *n;
766 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
767 u_int8_t l3proto = nfmsg->nfgen_family;
768 int res;
769 #ifdef CONFIG_NF_CONNTRACK_MARK
770 const struct ctnetlink_dump_filter *filter = cb->data;
771 #endif
773 spin_lock_bh(&nf_conntrack_lock);
774 last = (struct nf_conn *)cb->args[1];
775 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
776 restart:
777 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
778 hnnode) {
779 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
780 continue;
781 ct = nf_ct_tuplehash_to_ctrack(h);
782 /* Dump entries of a given L3 protocol number.
783 * If it is not specified, ie. l3proto == 0,
784 * then dump everything. */
785 if (l3proto && nf_ct_l3num(ct) != l3proto)
786 continue;
787 if (cb->args[1]) {
788 if (ct != last)
789 continue;
790 cb->args[1] = 0;
792 #ifdef CONFIG_NF_CONNTRACK_MARK
793 if (filter && !((ct->mark & filter->mark.mask) ==
794 filter->mark.val)) {
795 continue;
797 #endif
798 rcu_read_lock();
799 res =
800 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
801 cb->nlh->nlmsg_seq,
802 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
803 ct);
804 rcu_read_unlock();
805 if (res < 0) {
806 nf_conntrack_get(&ct->ct_general);
807 cb->args[1] = (unsigned long)ct;
808 goto out;
811 if (cb->args[1]) {
812 cb->args[1] = 0;
813 goto restart;
816 out:
817 spin_unlock_bh(&nf_conntrack_lock);
818 if (last)
819 nf_ct_put(last);
821 return skb->len;
824 static inline int
825 ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
827 struct nlattr *tb[CTA_IP_MAX+1];
828 struct nf_conntrack_l3proto *l3proto;
829 int ret = 0;
831 nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
833 rcu_read_lock();
834 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
836 if (likely(l3proto->nlattr_to_tuple)) {
837 ret = nla_validate_nested(attr, CTA_IP_MAX,
838 l3proto->nla_policy);
839 if (ret == 0)
840 ret = l3proto->nlattr_to_tuple(tb, tuple);
843 rcu_read_unlock();
845 return ret;
848 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
849 [CTA_PROTO_NUM] = { .type = NLA_U8 },
852 static inline int
853 ctnetlink_parse_tuple_proto(struct nlattr *attr,
854 struct nf_conntrack_tuple *tuple)
856 struct nlattr *tb[CTA_PROTO_MAX+1];
857 struct nf_conntrack_l4proto *l4proto;
858 int ret = 0;
860 ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
861 if (ret < 0)
862 return ret;
864 if (!tb[CTA_PROTO_NUM])
865 return -EINVAL;
866 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
868 rcu_read_lock();
869 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
871 if (likely(l4proto->nlattr_to_tuple)) {
872 ret = nla_validate_nested(attr, CTA_PROTO_MAX,
873 l4proto->nla_policy);
874 if (ret == 0)
875 ret = l4proto->nlattr_to_tuple(tb, tuple);
878 rcu_read_unlock();
880 return ret;
883 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
884 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
885 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
888 static int
889 ctnetlink_parse_tuple(const struct nlattr * const cda[],
890 struct nf_conntrack_tuple *tuple,
891 enum ctattr_type type, u_int8_t l3num)
893 struct nlattr *tb[CTA_TUPLE_MAX+1];
894 int err;
896 memset(tuple, 0, sizeof(*tuple));
898 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
900 if (!tb[CTA_TUPLE_IP])
901 return -EINVAL;
903 tuple->src.l3num = l3num;
905 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
906 if (err < 0)
907 return err;
909 if (!tb[CTA_TUPLE_PROTO])
910 return -EINVAL;
912 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
913 if (err < 0)
914 return err;
916 /* orig and expect tuples get DIR_ORIGINAL */
917 if (type == CTA_TUPLE_REPLY)
918 tuple->dst.dir = IP_CT_DIR_REPLY;
919 else
920 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
922 return 0;
925 static int
926 ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
928 if (attr)
929 #ifdef CONFIG_NF_CONNTRACK_ZONES
930 *zone = ntohs(nla_get_be16(attr));
931 #else
932 return -EOPNOTSUPP;
933 #endif
934 else
935 *zone = 0;
937 return 0;
940 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
941 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
942 .len = NF_CT_HELPER_NAME_LEN - 1 },
945 static inline int
946 ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
947 struct nlattr **helpinfo)
949 struct nlattr *tb[CTA_HELP_MAX+1];
951 nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
953 if (!tb[CTA_HELP_NAME])
954 return -EINVAL;
956 *helper_name = nla_data(tb[CTA_HELP_NAME]);
958 if (tb[CTA_HELP_INFO])
959 *helpinfo = tb[CTA_HELP_INFO];
961 return 0;
964 #define __CTA_LABELS_MAX_LENGTH ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
965 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
966 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
967 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
968 [CTA_STATUS] = { .type = NLA_U32 },
969 [CTA_PROTOINFO] = { .type = NLA_NESTED },
970 [CTA_HELP] = { .type = NLA_NESTED },
971 [CTA_NAT_SRC] = { .type = NLA_NESTED },
972 [CTA_TIMEOUT] = { .type = NLA_U32 },
973 [CTA_MARK] = { .type = NLA_U32 },
974 [CTA_ID] = { .type = NLA_U32 },
975 [CTA_NAT_DST] = { .type = NLA_NESTED },
976 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
977 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
978 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
979 [CTA_ZONE] = { .type = NLA_U16 },
980 [CTA_MARK_MASK] = { .type = NLA_U32 },
981 [CTA_LABELS] = { .type = NLA_BINARY,
982 .len = __CTA_LABELS_MAX_LENGTH },
983 [CTA_LABELS_MASK] = { .type = NLA_BINARY,
984 .len = __CTA_LABELS_MAX_LENGTH },
987 static int
988 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
989 const struct nlmsghdr *nlh,
990 const struct nlattr * const cda[])
992 struct net *net = sock_net(ctnl);
993 struct nf_conntrack_tuple_hash *h;
994 struct nf_conntrack_tuple tuple;
995 struct nf_conn *ct;
996 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
997 u_int8_t u3 = nfmsg->nfgen_family;
998 u16 zone;
999 int err;
1001 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1002 if (err < 0)
1003 return err;
1005 if (cda[CTA_TUPLE_ORIG])
1006 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1007 else if (cda[CTA_TUPLE_REPLY])
1008 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1009 else {
1010 /* Flush the whole table */
1011 nf_conntrack_flush_report(net,
1012 NETLINK_CB(skb).portid,
1013 nlmsg_report(nlh));
1014 return 0;
1017 if (err < 0)
1018 return err;
1020 h = nf_conntrack_find_get(net, zone, &tuple);
1021 if (!h)
1022 return -ENOENT;
1024 ct = nf_ct_tuplehash_to_ctrack(h);
1026 if (cda[CTA_ID]) {
1027 u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
1028 if (id != (u32)(unsigned long)ct) {
1029 nf_ct_put(ct);
1030 return -ENOENT;
1034 if (del_timer(&ct->timeout)) {
1035 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
1036 NETLINK_CB(skb).portid,
1037 nlmsg_report(nlh)) < 0) {
1038 nf_ct_delete_from_lists(ct);
1039 /* we failed to report the event, try later */
1040 nf_ct_dying_timeout(ct);
1041 nf_ct_put(ct);
1042 return 0;
1044 /* death_by_timeout would report the event again */
1045 set_bit(IPS_DYING_BIT, &ct->status);
1046 nf_ct_delete_from_lists(ct);
1047 nf_ct_put(ct);
1049 nf_ct_put(ct);
1051 return 0;
1054 static int
1055 ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
1056 const struct nlmsghdr *nlh,
1057 const struct nlattr * const cda[])
1059 struct net *net = sock_net(ctnl);
1060 struct nf_conntrack_tuple_hash *h;
1061 struct nf_conntrack_tuple tuple;
1062 struct nf_conn *ct;
1063 struct sk_buff *skb2 = NULL;
1064 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1065 u_int8_t u3 = nfmsg->nfgen_family;
1066 u16 zone;
1067 int err;
1069 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1070 struct netlink_dump_control c = {
1071 .dump = ctnetlink_dump_table,
1072 .done = ctnetlink_done,
1074 #ifdef CONFIG_NF_CONNTRACK_MARK
1075 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1076 struct ctnetlink_dump_filter *filter;
1078 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1079 GFP_ATOMIC);
1080 if (filter == NULL)
1081 return -ENOMEM;
1083 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1084 filter->mark.mask =
1085 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1086 c.data = filter;
1088 #endif
1089 return netlink_dump_start(ctnl, skb, nlh, &c);
1092 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1093 if (err < 0)
1094 return err;
1096 if (cda[CTA_TUPLE_ORIG])
1097 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1098 else if (cda[CTA_TUPLE_REPLY])
1099 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1100 else
1101 return -EINVAL;
1103 if (err < 0)
1104 return err;
1106 h = nf_conntrack_find_get(net, zone, &tuple);
1107 if (!h)
1108 return -ENOENT;
1110 ct = nf_ct_tuplehash_to_ctrack(h);
1112 err = -ENOMEM;
1113 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1114 if (skb2 == NULL) {
1115 nf_ct_put(ct);
1116 return -ENOMEM;
1119 rcu_read_lock();
1120 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1121 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1122 rcu_read_unlock();
1123 nf_ct_put(ct);
1124 if (err <= 0)
1125 goto free;
1127 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1128 if (err < 0)
1129 goto out;
1131 return 0;
1133 free:
1134 kfree_skb(skb2);
1135 out:
1136 /* this avoids a loop in nfnetlink. */
1137 return err == -EAGAIN ? -ENOBUFS : err;
1140 static int ctnetlink_done_list(struct netlink_callback *cb)
1142 if (cb->args[1])
1143 nf_ct_put((struct nf_conn *)cb->args[1]);
1144 return 0;
1147 static int
1148 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb,
1149 struct hlist_nulls_head *list)
1151 struct nf_conn *ct, *last;
1152 struct nf_conntrack_tuple_hash *h;
1153 struct hlist_nulls_node *n;
1154 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1155 u_int8_t l3proto = nfmsg->nfgen_family;
1156 int res;
1158 if (cb->args[2])
1159 return 0;
1161 spin_lock_bh(&nf_conntrack_lock);
1162 last = (struct nf_conn *)cb->args[1];
1163 restart:
1164 hlist_nulls_for_each_entry(h, n, list, hnnode) {
1165 ct = nf_ct_tuplehash_to_ctrack(h);
1166 if (l3proto && nf_ct_l3num(ct) != l3proto)
1167 continue;
1168 if (cb->args[1]) {
1169 if (ct != last)
1170 continue;
1171 cb->args[1] = 0;
1173 rcu_read_lock();
1174 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1175 cb->nlh->nlmsg_seq,
1176 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1177 ct);
1178 rcu_read_unlock();
1179 if (res < 0) {
1180 nf_conntrack_get(&ct->ct_general);
1181 cb->args[1] = (unsigned long)ct;
1182 goto out;
1185 if (cb->args[1]) {
1186 cb->args[1] = 0;
1187 goto restart;
1188 } else
1189 cb->args[2] = 1;
1190 out:
1191 spin_unlock_bh(&nf_conntrack_lock);
1192 if (last)
1193 nf_ct_put(last);
1195 return skb->len;
1198 static int
1199 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1201 struct net *net = sock_net(skb->sk);
1203 return ctnetlink_dump_list(skb, cb, &net->ct.dying);
1206 static int
1207 ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
1208 const struct nlmsghdr *nlh,
1209 const struct nlattr * const cda[])
1211 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1212 struct netlink_dump_control c = {
1213 .dump = ctnetlink_dump_dying,
1214 .done = ctnetlink_done_list,
1216 return netlink_dump_start(ctnl, skb, nlh, &c);
1219 return -EOPNOTSUPP;
1222 static int
1223 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1225 struct net *net = sock_net(skb->sk);
1227 return ctnetlink_dump_list(skb, cb, &net->ct.unconfirmed);
1230 static int
1231 ctnetlink_get_ct_unconfirmed(struct sock *ctnl, struct sk_buff *skb,
1232 const struct nlmsghdr *nlh,
1233 const struct nlattr * const cda[])
1235 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1236 struct netlink_dump_control c = {
1237 .dump = ctnetlink_dump_unconfirmed,
1238 .done = ctnetlink_done_list,
1240 return netlink_dump_start(ctnl, skb, nlh, &c);
1243 return -EOPNOTSUPP;
1246 #ifdef CONFIG_NF_NAT_NEEDED
1247 static int
1248 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1249 enum nf_nat_manip_type manip,
1250 const struct nlattr *attr)
1252 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1253 int err;
1255 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1256 if (!parse_nat_setup) {
1257 #ifdef CONFIG_MODULES
1258 rcu_read_unlock();
1259 nfnl_unlock();
1260 if (request_module("nf-nat") < 0) {
1261 nfnl_lock();
1262 rcu_read_lock();
1263 return -EOPNOTSUPP;
1265 nfnl_lock();
1266 rcu_read_lock();
1267 if (nfnetlink_parse_nat_setup_hook)
1268 return -EAGAIN;
1269 #endif
1270 return -EOPNOTSUPP;
1273 err = parse_nat_setup(ct, manip, attr);
1274 if (err == -EAGAIN) {
1275 #ifdef CONFIG_MODULES
1276 rcu_read_unlock();
1277 nfnl_unlock();
1278 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1279 nfnl_lock();
1280 rcu_read_lock();
1281 return -EOPNOTSUPP;
1283 nfnl_lock();
1284 rcu_read_lock();
1285 #else
1286 err = -EOPNOTSUPP;
1287 #endif
1289 return err;
1291 #endif
1293 static int
1294 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1296 unsigned long d;
1297 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1298 d = ct->status ^ status;
1300 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1301 /* unchangeable */
1302 return -EBUSY;
1304 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1305 /* SEEN_REPLY bit can only be set */
1306 return -EBUSY;
1308 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1309 /* ASSURED bit can only be set */
1310 return -EBUSY;
1312 /* Be careful here, modifying NAT bits can screw up things,
1313 * so don't let users modify them directly if they don't pass
1314 * nf_nat_range. */
1315 ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
1316 return 0;
1319 static int
1320 ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1322 #ifdef CONFIG_NF_NAT_NEEDED
1323 int ret;
1325 if (cda[CTA_NAT_DST]) {
1326 ret = ctnetlink_parse_nat_setup(ct,
1327 NF_NAT_MANIP_DST,
1328 cda[CTA_NAT_DST]);
1329 if (ret < 0)
1330 return ret;
1332 if (cda[CTA_NAT_SRC]) {
1333 ret = ctnetlink_parse_nat_setup(ct,
1334 NF_NAT_MANIP_SRC,
1335 cda[CTA_NAT_SRC]);
1336 if (ret < 0)
1337 return ret;
1339 return 0;
1340 #else
1341 return -EOPNOTSUPP;
1342 #endif
1345 static inline int
1346 ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1348 struct nf_conntrack_helper *helper;
1349 struct nf_conn_help *help = nfct_help(ct);
1350 char *helpname = NULL;
1351 struct nlattr *helpinfo = NULL;
1352 int err;
1354 /* don't change helper of sibling connections */
1355 if (ct->master)
1356 return -EBUSY;
1358 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1359 if (err < 0)
1360 return err;
1362 if (!strcmp(helpname, "")) {
1363 if (help && help->helper) {
1364 /* we had a helper before ... */
1365 nf_ct_remove_expectations(ct);
1366 RCU_INIT_POINTER(help->helper, NULL);
1369 return 0;
1372 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1373 nf_ct_protonum(ct));
1374 if (helper == NULL) {
1375 #ifdef CONFIG_MODULES
1376 spin_unlock_bh(&nf_conntrack_lock);
1378 if (request_module("nfct-helper-%s", helpname) < 0) {
1379 spin_lock_bh(&nf_conntrack_lock);
1380 return -EOPNOTSUPP;
1383 spin_lock_bh(&nf_conntrack_lock);
1384 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1385 nf_ct_protonum(ct));
1386 if (helper)
1387 return -EAGAIN;
1388 #endif
1389 return -EOPNOTSUPP;
1392 if (help) {
1393 if (help->helper == helper) {
1394 /* update private helper data if allowed. */
1395 if (helper->from_nlattr)
1396 helper->from_nlattr(helpinfo, ct);
1397 return 0;
1398 } else
1399 return -EBUSY;
1402 /* we cannot set a helper for an existing conntrack */
1403 return -EOPNOTSUPP;
1406 static inline int
1407 ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1409 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1411 if (!del_timer(&ct->timeout))
1412 return -ETIME;
1414 ct->timeout.expires = jiffies + timeout * HZ;
1415 add_timer(&ct->timeout);
1417 return 0;
1420 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1421 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1422 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1423 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1426 static inline int
1427 ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1429 const struct nlattr *attr = cda[CTA_PROTOINFO];
1430 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1431 struct nf_conntrack_l4proto *l4proto;
1432 int err = 0;
1434 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1436 rcu_read_lock();
1437 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1438 if (l4proto->from_nlattr)
1439 err = l4proto->from_nlattr(tb, ct);
1440 rcu_read_unlock();
1442 return err;
1445 #ifdef CONFIG_NF_NAT_NEEDED
1446 static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1447 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1448 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1449 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1452 static inline int
1453 change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1455 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1457 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1459 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1460 return -EINVAL;
1462 natseq->correction_pos =
1463 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
1465 if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
1466 return -EINVAL;
1468 natseq->offset_before =
1469 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
1471 if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
1472 return -EINVAL;
1474 natseq->offset_after =
1475 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
1477 return 0;
1480 static int
1481 ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
1482 const struct nlattr * const cda[])
1484 int ret = 0;
1485 struct nf_conn_nat *nat = nfct_nat(ct);
1487 if (!nat)
1488 return 0;
1490 if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
1491 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
1492 cda[CTA_NAT_SEQ_ADJ_ORIG]);
1493 if (ret < 0)
1494 return ret;
1496 ct->status |= IPS_SEQ_ADJUST;
1499 if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1500 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
1501 cda[CTA_NAT_SEQ_ADJ_REPLY]);
1502 if (ret < 0)
1503 return ret;
1505 ct->status |= IPS_SEQ_ADJUST;
1508 return 0;
1510 #endif
1512 static int
1513 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
1515 #ifdef CONFIG_NF_CONNTRACK_LABELS
1516 size_t len = nla_len(cda[CTA_LABELS]);
1517 const void *mask = cda[CTA_LABELS_MASK];
1519 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
1520 return -EINVAL;
1522 if (mask) {
1523 if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
1524 nla_len(cda[CTA_LABELS_MASK]) != len)
1525 return -EINVAL;
1526 mask = nla_data(cda[CTA_LABELS_MASK]);
1529 len /= sizeof(u32);
1531 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
1532 #else
1533 return -EOPNOTSUPP;
1534 #endif
1537 static int
1538 ctnetlink_change_conntrack(struct nf_conn *ct,
1539 const struct nlattr * const cda[])
1541 int err;
1543 /* only allow NAT changes and master assignation for new conntracks */
1544 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1545 return -EOPNOTSUPP;
1547 if (cda[CTA_HELP]) {
1548 err = ctnetlink_change_helper(ct, cda);
1549 if (err < 0)
1550 return err;
1553 if (cda[CTA_TIMEOUT]) {
1554 err = ctnetlink_change_timeout(ct, cda);
1555 if (err < 0)
1556 return err;
1559 if (cda[CTA_STATUS]) {
1560 err = ctnetlink_change_status(ct, cda);
1561 if (err < 0)
1562 return err;
1565 if (cda[CTA_PROTOINFO]) {
1566 err = ctnetlink_change_protoinfo(ct, cda);
1567 if (err < 0)
1568 return err;
1571 #if defined(CONFIG_NF_CONNTRACK_MARK)
1572 if (cda[CTA_MARK])
1573 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1574 #endif
1576 #ifdef CONFIG_NF_NAT_NEEDED
1577 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1578 err = ctnetlink_change_nat_seq_adj(ct, cda);
1579 if (err < 0)
1580 return err;
1582 #endif
1583 if (cda[CTA_LABELS]) {
1584 err = ctnetlink_attach_labels(ct, cda);
1585 if (err < 0)
1586 return err;
1589 return 0;
1592 static struct nf_conn *
1593 ctnetlink_create_conntrack(struct net *net, u16 zone,
1594 const struct nlattr * const cda[],
1595 struct nf_conntrack_tuple *otuple,
1596 struct nf_conntrack_tuple *rtuple,
1597 u8 u3)
1599 struct nf_conn *ct;
1600 int err = -EINVAL;
1601 struct nf_conntrack_helper *helper;
1602 struct nf_conn_tstamp *tstamp;
1604 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1605 if (IS_ERR(ct))
1606 return ERR_PTR(-ENOMEM);
1608 if (!cda[CTA_TIMEOUT])
1609 goto err1;
1610 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1612 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1614 rcu_read_lock();
1615 if (cda[CTA_HELP]) {
1616 char *helpname = NULL;
1617 struct nlattr *helpinfo = NULL;
1619 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1620 if (err < 0)
1621 goto err2;
1623 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1624 nf_ct_protonum(ct));
1625 if (helper == NULL) {
1626 rcu_read_unlock();
1627 #ifdef CONFIG_MODULES
1628 if (request_module("nfct-helper-%s", helpname) < 0) {
1629 err = -EOPNOTSUPP;
1630 goto err1;
1633 rcu_read_lock();
1634 helper = __nf_conntrack_helper_find(helpname,
1635 nf_ct_l3num(ct),
1636 nf_ct_protonum(ct));
1637 if (helper) {
1638 err = -EAGAIN;
1639 goto err2;
1641 rcu_read_unlock();
1642 #endif
1643 err = -EOPNOTSUPP;
1644 goto err1;
1645 } else {
1646 struct nf_conn_help *help;
1648 help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
1649 if (help == NULL) {
1650 err = -ENOMEM;
1651 goto err2;
1653 /* set private helper data if allowed. */
1654 if (helper->from_nlattr)
1655 helper->from_nlattr(helpinfo, ct);
1657 /* not in hash table yet so not strictly necessary */
1658 RCU_INIT_POINTER(help->helper, helper);
1660 } else {
1661 /* try an implicit helper assignation */
1662 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1663 if (err < 0)
1664 goto err2;
1667 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1668 err = ctnetlink_change_nat(ct, cda);
1669 if (err < 0)
1670 goto err2;
1673 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1674 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1675 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1676 nf_ct_labels_ext_add(ct);
1678 /* we must add conntrack extensions before confirmation. */
1679 ct->status |= IPS_CONFIRMED;
1681 if (cda[CTA_STATUS]) {
1682 err = ctnetlink_change_status(ct, cda);
1683 if (err < 0)
1684 goto err2;
1687 #ifdef CONFIG_NF_NAT_NEEDED
1688 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1689 err = ctnetlink_change_nat_seq_adj(ct, cda);
1690 if (err < 0)
1691 goto err2;
1693 #endif
1695 memset(&ct->proto, 0, sizeof(ct->proto));
1696 if (cda[CTA_PROTOINFO]) {
1697 err = ctnetlink_change_protoinfo(ct, cda);
1698 if (err < 0)
1699 goto err2;
1702 #if defined(CONFIG_NF_CONNTRACK_MARK)
1703 if (cda[CTA_MARK])
1704 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1705 #endif
1707 /* setup master conntrack: this is a confirmed expectation */
1708 if (cda[CTA_TUPLE_MASTER]) {
1709 struct nf_conntrack_tuple master;
1710 struct nf_conntrack_tuple_hash *master_h;
1711 struct nf_conn *master_ct;
1713 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1714 if (err < 0)
1715 goto err2;
1717 master_h = nf_conntrack_find_get(net, zone, &master);
1718 if (master_h == NULL) {
1719 err = -ENOENT;
1720 goto err2;
1722 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1723 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1724 ct->master = master_ct;
1726 tstamp = nf_conn_tstamp_find(ct);
1727 if (tstamp)
1728 tstamp->start = ktime_to_ns(ktime_get_real());
1730 err = nf_conntrack_hash_check_insert(ct);
1731 if (err < 0)
1732 goto err2;
1734 rcu_read_unlock();
1736 return ct;
1738 err2:
1739 rcu_read_unlock();
1740 err1:
1741 nf_conntrack_free(ct);
1742 return ERR_PTR(err);
1745 static int
1746 ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1747 const struct nlmsghdr *nlh,
1748 const struct nlattr * const cda[])
1750 struct net *net = sock_net(ctnl);
1751 struct nf_conntrack_tuple otuple, rtuple;
1752 struct nf_conntrack_tuple_hash *h = NULL;
1753 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1754 struct nf_conn *ct;
1755 u_int8_t u3 = nfmsg->nfgen_family;
1756 u16 zone;
1757 int err;
1759 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1760 if (err < 0)
1761 return err;
1763 if (cda[CTA_TUPLE_ORIG]) {
1764 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
1765 if (err < 0)
1766 return err;
1769 if (cda[CTA_TUPLE_REPLY]) {
1770 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
1771 if (err < 0)
1772 return err;
1775 if (cda[CTA_TUPLE_ORIG])
1776 h = nf_conntrack_find_get(net, zone, &otuple);
1777 else if (cda[CTA_TUPLE_REPLY])
1778 h = nf_conntrack_find_get(net, zone, &rtuple);
1780 if (h == NULL) {
1781 err = -ENOENT;
1782 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1783 enum ip_conntrack_events events;
1785 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1786 &rtuple, u3);
1787 if (IS_ERR(ct))
1788 return PTR_ERR(ct);
1790 err = 0;
1791 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1792 events = IPCT_RELATED;
1793 else
1794 events = IPCT_NEW;
1796 if (cda[CTA_LABELS] &&
1797 ctnetlink_attach_labels(ct, cda) == 0)
1798 events |= (1 << IPCT_LABEL);
1800 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1801 (1 << IPCT_ASSURED) |
1802 (1 << IPCT_HELPER) |
1803 (1 << IPCT_PROTOINFO) |
1804 (1 << IPCT_NATSEQADJ) |
1805 (1 << IPCT_MARK) | events,
1806 ct, NETLINK_CB(skb).portid,
1807 nlmsg_report(nlh));
1808 nf_ct_put(ct);
1811 return err;
1813 /* implicit 'else' */
1815 err = -EEXIST;
1816 ct = nf_ct_tuplehash_to_ctrack(h);
1817 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1818 spin_lock_bh(&nf_conntrack_lock);
1819 err = ctnetlink_change_conntrack(ct, cda);
1820 spin_unlock_bh(&nf_conntrack_lock);
1821 if (err == 0) {
1822 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1823 (1 << IPCT_ASSURED) |
1824 (1 << IPCT_HELPER) |
1825 (1 << IPCT_PROTOINFO) |
1826 (1 << IPCT_NATSEQADJ) |
1827 (1 << IPCT_MARK),
1828 ct, NETLINK_CB(skb).portid,
1829 nlmsg_report(nlh));
1833 nf_ct_put(ct);
1834 return err;
1837 static int
1838 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
1839 __u16 cpu, const struct ip_conntrack_stat *st)
1841 struct nlmsghdr *nlh;
1842 struct nfgenmsg *nfmsg;
1843 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1845 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1846 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1847 if (nlh == NULL)
1848 goto nlmsg_failure;
1850 nfmsg = nlmsg_data(nlh);
1851 nfmsg->nfgen_family = AF_UNSPEC;
1852 nfmsg->version = NFNETLINK_V0;
1853 nfmsg->res_id = htons(cpu);
1855 if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
1856 nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
1857 nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
1858 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
1859 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
1860 nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
1861 nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
1862 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
1863 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
1864 htonl(st->insert_failed)) ||
1865 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
1866 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
1867 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
1868 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
1869 htonl(st->search_restart)))
1870 goto nla_put_failure;
1872 nlmsg_end(skb, nlh);
1873 return skb->len;
1875 nla_put_failure:
1876 nlmsg_failure:
1877 nlmsg_cancel(skb, nlh);
1878 return -1;
1881 static int
1882 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1884 int cpu;
1885 struct net *net = sock_net(skb->sk);
1887 if (cb->args[0] == nr_cpu_ids)
1888 return 0;
1890 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1891 const struct ip_conntrack_stat *st;
1893 if (!cpu_possible(cpu))
1894 continue;
1896 st = per_cpu_ptr(net->ct.stat, cpu);
1897 if (ctnetlink_ct_stat_cpu_fill_info(skb,
1898 NETLINK_CB(cb->skb).portid,
1899 cb->nlh->nlmsg_seq,
1900 cpu, st) < 0)
1901 break;
1903 cb->args[0] = cpu;
1905 return skb->len;
1908 static int
1909 ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1910 const struct nlmsghdr *nlh,
1911 const struct nlattr * const cda[])
1913 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1914 struct netlink_dump_control c = {
1915 .dump = ctnetlink_ct_stat_cpu_dump,
1917 return netlink_dump_start(ctnl, skb, nlh, &c);
1920 return 0;
1923 static int
1924 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
1925 struct net *net)
1927 struct nlmsghdr *nlh;
1928 struct nfgenmsg *nfmsg;
1929 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1930 unsigned int nr_conntracks = atomic_read(&net->ct.count);
1932 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1933 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1934 if (nlh == NULL)
1935 goto nlmsg_failure;
1937 nfmsg = nlmsg_data(nlh);
1938 nfmsg->nfgen_family = AF_UNSPEC;
1939 nfmsg->version = NFNETLINK_V0;
1940 nfmsg->res_id = 0;
1942 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
1943 goto nla_put_failure;
1945 nlmsg_end(skb, nlh);
1946 return skb->len;
1948 nla_put_failure:
1949 nlmsg_failure:
1950 nlmsg_cancel(skb, nlh);
1951 return -1;
1954 static int
1955 ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
1956 const struct nlmsghdr *nlh,
1957 const struct nlattr * const cda[])
1959 struct sk_buff *skb2;
1960 int err;
1962 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1963 if (skb2 == NULL)
1964 return -ENOMEM;
1966 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
1967 nlh->nlmsg_seq,
1968 NFNL_MSG_TYPE(nlh->nlmsg_type),
1969 sock_net(skb->sk));
1970 if (err <= 0)
1971 goto free;
1973 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1974 if (err < 0)
1975 goto out;
1977 return 0;
1979 free:
1980 kfree_skb(skb2);
1981 out:
1982 /* this avoids a loop in nfnetlink. */
1983 return err == -EAGAIN ? -ENOBUFS : err;
1986 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
1987 static size_t
1988 ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
1990 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
1991 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
1992 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
1993 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
1994 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
1995 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
1996 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
1997 + nla_total_size(0) /* CTA_PROTOINFO */
1998 + nla_total_size(0) /* CTA_HELP */
1999 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2000 + ctnetlink_secctx_size(ct)
2001 #ifdef CONFIG_NF_NAT_NEEDED
2002 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2003 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2004 #endif
2005 #ifdef CONFIG_NF_CONNTRACK_MARK
2006 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2007 #endif
2008 + ctnetlink_proto_size(ct)
2012 static int
2013 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
2015 struct nlattr *nest_parms;
2017 rcu_read_lock();
2018 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
2019 if (!nest_parms)
2020 goto nla_put_failure;
2021 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2022 goto nla_put_failure;
2023 nla_nest_end(skb, nest_parms);
2025 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
2026 if (!nest_parms)
2027 goto nla_put_failure;
2028 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2029 goto nla_put_failure;
2030 nla_nest_end(skb, nest_parms);
2032 if (nf_ct_zone(ct)) {
2033 if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
2034 goto nla_put_failure;
2037 if (ctnetlink_dump_id(skb, ct) < 0)
2038 goto nla_put_failure;
2040 if (ctnetlink_dump_status(skb, ct) < 0)
2041 goto nla_put_failure;
2043 if (ctnetlink_dump_timeout(skb, ct) < 0)
2044 goto nla_put_failure;
2046 if (ctnetlink_dump_protoinfo(skb, ct) < 0)
2047 goto nla_put_failure;
2049 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2050 goto nla_put_failure;
2052 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2053 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2054 goto nla_put_failure;
2055 #endif
2056 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2057 goto nla_put_failure;
2059 if ((ct->status & IPS_SEQ_ADJUST) &&
2060 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
2061 goto nla_put_failure;
2063 #ifdef CONFIG_NF_CONNTRACK_MARK
2064 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2065 goto nla_put_failure;
2066 #endif
2067 if (ctnetlink_dump_labels(skb, ct) < 0)
2068 goto nla_put_failure;
2069 rcu_read_unlock();
2070 return 0;
2072 nla_put_failure:
2073 rcu_read_unlock();
2074 return -ENOSPC;
2077 static int
2078 ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2080 int err;
2082 if (cda[CTA_TIMEOUT]) {
2083 err = ctnetlink_change_timeout(ct, cda);
2084 if (err < 0)
2085 return err;
2087 if (cda[CTA_STATUS]) {
2088 err = ctnetlink_change_status(ct, cda);
2089 if (err < 0)
2090 return err;
2092 if (cda[CTA_HELP]) {
2093 err = ctnetlink_change_helper(ct, cda);
2094 if (err < 0)
2095 return err;
2097 if (cda[CTA_LABELS]) {
2098 err = ctnetlink_attach_labels(ct, cda);
2099 if (err < 0)
2100 return err;
2102 #if defined(CONFIG_NF_CONNTRACK_MARK)
2103 if (cda[CTA_MARK])
2104 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
2105 #endif
2106 return 0;
2109 static int
2110 ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
2112 struct nlattr *cda[CTA_MAX+1];
2113 int ret;
2115 nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
2117 spin_lock_bh(&nf_conntrack_lock);
2118 ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
2119 spin_unlock_bh(&nf_conntrack_lock);
2121 return ret;
2124 static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
2125 .build_size = ctnetlink_nfqueue_build_size,
2126 .build = ctnetlink_nfqueue_build,
2127 .parse = ctnetlink_nfqueue_parse,
2129 #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
2131 /***********************************************************************
2132 * EXPECT
2133 ***********************************************************************/
2135 static inline int
2136 ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2137 const struct nf_conntrack_tuple *tuple,
2138 enum ctattr_expect type)
2140 struct nlattr *nest_parms;
2142 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
2143 if (!nest_parms)
2144 goto nla_put_failure;
2145 if (ctnetlink_dump_tuples(skb, tuple) < 0)
2146 goto nla_put_failure;
2147 nla_nest_end(skb, nest_parms);
2149 return 0;
2151 nla_put_failure:
2152 return -1;
2155 static inline int
2156 ctnetlink_exp_dump_mask(struct sk_buff *skb,
2157 const struct nf_conntrack_tuple *tuple,
2158 const struct nf_conntrack_tuple_mask *mask)
2160 int ret;
2161 struct nf_conntrack_l3proto *l3proto;
2162 struct nf_conntrack_l4proto *l4proto;
2163 struct nf_conntrack_tuple m;
2164 struct nlattr *nest_parms;
2166 memset(&m, 0xFF, sizeof(m));
2167 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2168 m.src.u.all = mask->src.u.all;
2169 m.dst.protonum = tuple->dst.protonum;
2171 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
2172 if (!nest_parms)
2173 goto nla_put_failure;
2175 rcu_read_lock();
2176 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
2177 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
2178 if (ret >= 0) {
2179 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
2180 tuple->dst.protonum);
2181 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2183 rcu_read_unlock();
2185 if (unlikely(ret < 0))
2186 goto nla_put_failure;
2188 nla_nest_end(skb, nest_parms);
2190 return 0;
2192 nla_put_failure:
2193 return -1;
2196 static const union nf_inet_addr any_addr;
2198 static int
2199 ctnetlink_exp_dump_expect(struct sk_buff *skb,
2200 const struct nf_conntrack_expect *exp)
2202 struct nf_conn *master = exp->master;
2203 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
2204 struct nf_conn_help *help;
2205 #ifdef CONFIG_NF_NAT_NEEDED
2206 struct nlattr *nest_parms;
2207 struct nf_conntrack_tuple nat_tuple = {};
2208 #endif
2209 struct nf_ct_helper_expectfn *expfn;
2211 if (timeout < 0)
2212 timeout = 0;
2214 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
2215 goto nla_put_failure;
2216 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
2217 goto nla_put_failure;
2218 if (ctnetlink_exp_dump_tuple(skb,
2219 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2220 CTA_EXPECT_MASTER) < 0)
2221 goto nla_put_failure;
2223 #ifdef CONFIG_NF_NAT_NEEDED
2224 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
2225 exp->saved_proto.all) {
2226 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
2227 if (!nest_parms)
2228 goto nla_put_failure;
2230 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
2231 goto nla_put_failure;
2233 nat_tuple.src.l3num = nf_ct_l3num(master);
2234 nat_tuple.src.u3 = exp->saved_addr;
2235 nat_tuple.dst.protonum = nf_ct_protonum(master);
2236 nat_tuple.src.u = exp->saved_proto;
2238 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
2239 CTA_EXPECT_NAT_TUPLE) < 0)
2240 goto nla_put_failure;
2241 nla_nest_end(skb, nest_parms);
2243 #endif
2244 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
2245 nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
2246 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
2247 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
2248 goto nla_put_failure;
2249 help = nfct_help(master);
2250 if (help) {
2251 struct nf_conntrack_helper *helper;
2253 helper = rcu_dereference(help->helper);
2254 if (helper &&
2255 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
2256 goto nla_put_failure;
2258 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
2259 if (expfn != NULL &&
2260 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
2261 goto nla_put_failure;
2263 return 0;
2265 nla_put_failure:
2266 return -1;
2269 static int
2270 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2271 int event, const struct nf_conntrack_expect *exp)
2273 struct nlmsghdr *nlh;
2274 struct nfgenmsg *nfmsg;
2275 unsigned int flags = portid ? NLM_F_MULTI : 0;
2277 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2278 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2279 if (nlh == NULL)
2280 goto nlmsg_failure;
2282 nfmsg = nlmsg_data(nlh);
2283 nfmsg->nfgen_family = exp->tuple.src.l3num;
2284 nfmsg->version = NFNETLINK_V0;
2285 nfmsg->res_id = 0;
2287 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2288 goto nla_put_failure;
2290 nlmsg_end(skb, nlh);
2291 return skb->len;
2293 nlmsg_failure:
2294 nla_put_failure:
2295 nlmsg_cancel(skb, nlh);
2296 return -1;
2299 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2300 static int
2301 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2303 struct nf_conntrack_expect *exp = item->exp;
2304 struct net *net = nf_ct_exp_net(exp);
2305 struct nlmsghdr *nlh;
2306 struct nfgenmsg *nfmsg;
2307 struct sk_buff *skb;
2308 unsigned int type, group;
2309 int flags = 0;
2311 if (events & (1 << IPEXP_DESTROY)) {
2312 type = IPCTNL_MSG_EXP_DELETE;
2313 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
2314 } else if (events & (1 << IPEXP_NEW)) {
2315 type = IPCTNL_MSG_EXP_NEW;
2316 flags = NLM_F_CREATE|NLM_F_EXCL;
2317 group = NFNLGRP_CONNTRACK_EXP_NEW;
2318 } else
2319 return 0;
2321 if (!item->report && !nfnetlink_has_listeners(net, group))
2322 return 0;
2324 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2325 if (skb == NULL)
2326 goto errout;
2328 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2329 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
2330 if (nlh == NULL)
2331 goto nlmsg_failure;
2333 nfmsg = nlmsg_data(nlh);
2334 nfmsg->nfgen_family = exp->tuple.src.l3num;
2335 nfmsg->version = NFNETLINK_V0;
2336 nfmsg->res_id = 0;
2338 rcu_read_lock();
2339 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2340 goto nla_put_failure;
2341 rcu_read_unlock();
2343 nlmsg_end(skb, nlh);
2344 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
2345 return 0;
2347 nla_put_failure:
2348 rcu_read_unlock();
2349 nlmsg_cancel(skb, nlh);
2350 nlmsg_failure:
2351 kfree_skb(skb);
2352 errout:
2353 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
2354 return 0;
2356 #endif
2357 static int ctnetlink_exp_done(struct netlink_callback *cb)
2359 if (cb->args[1])
2360 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
2361 return 0;
2364 static int
2365 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2367 struct net *net = sock_net(skb->sk);
2368 struct nf_conntrack_expect *exp, *last;
2369 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2370 struct hlist_node *n;
2371 u_int8_t l3proto = nfmsg->nfgen_family;
2373 rcu_read_lock();
2374 last = (struct nf_conntrack_expect *)cb->args[1];
2375 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2376 restart:
2377 hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]],
2378 hnode) {
2379 if (l3proto && exp->tuple.src.l3num != l3proto)
2380 continue;
2381 if (cb->args[1]) {
2382 if (exp != last)
2383 continue;
2384 cb->args[1] = 0;
2386 if (ctnetlink_exp_fill_info(skb,
2387 NETLINK_CB(cb->skb).portid,
2388 cb->nlh->nlmsg_seq,
2389 IPCTNL_MSG_EXP_NEW,
2390 exp) < 0) {
2391 if (!atomic_inc_not_zero(&exp->use))
2392 continue;
2393 cb->args[1] = (unsigned long)exp;
2394 goto out;
2397 if (cb->args[1]) {
2398 cb->args[1] = 0;
2399 goto restart;
2402 out:
2403 rcu_read_unlock();
2404 if (last)
2405 nf_ct_expect_put(last);
2407 return skb->len;
2410 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2411 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2412 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
2413 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2414 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2415 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2416 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2417 .len = NF_CT_HELPER_NAME_LEN - 1 },
2418 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2419 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2420 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2421 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2422 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2425 static int
2426 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2427 const struct nlmsghdr *nlh,
2428 const struct nlattr * const cda[])
2430 struct net *net = sock_net(ctnl);
2431 struct nf_conntrack_tuple tuple;
2432 struct nf_conntrack_expect *exp;
2433 struct sk_buff *skb2;
2434 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2435 u_int8_t u3 = nfmsg->nfgen_family;
2436 u16 zone;
2437 int err;
2439 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2440 struct netlink_dump_control c = {
2441 .dump = ctnetlink_exp_dump_table,
2442 .done = ctnetlink_exp_done,
2444 return netlink_dump_start(ctnl, skb, nlh, &c);
2447 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2448 if (err < 0)
2449 return err;
2451 if (cda[CTA_EXPECT_TUPLE])
2452 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2453 else if (cda[CTA_EXPECT_MASTER])
2454 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2455 else
2456 return -EINVAL;
2458 if (err < 0)
2459 return err;
2461 exp = nf_ct_expect_find_get(net, zone, &tuple);
2462 if (!exp)
2463 return -ENOENT;
2465 if (cda[CTA_EXPECT_ID]) {
2466 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2467 if (ntohl(id) != (u32)(unsigned long)exp) {
2468 nf_ct_expect_put(exp);
2469 return -ENOENT;
2473 err = -ENOMEM;
2474 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2475 if (skb2 == NULL) {
2476 nf_ct_expect_put(exp);
2477 goto out;
2480 rcu_read_lock();
2481 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
2482 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
2483 rcu_read_unlock();
2484 nf_ct_expect_put(exp);
2485 if (err <= 0)
2486 goto free;
2488 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2489 if (err < 0)
2490 goto out;
2492 return 0;
2494 free:
2495 kfree_skb(skb2);
2496 out:
2497 /* this avoids a loop in nfnetlink. */
2498 return err == -EAGAIN ? -ENOBUFS : err;
2501 static int
2502 ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2503 const struct nlmsghdr *nlh,
2504 const struct nlattr * const cda[])
2506 struct net *net = sock_net(ctnl);
2507 struct nf_conntrack_expect *exp;
2508 struct nf_conntrack_tuple tuple;
2509 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2510 struct hlist_node *n, *next;
2511 u_int8_t u3 = nfmsg->nfgen_family;
2512 unsigned int i;
2513 u16 zone;
2514 int err;
2516 if (cda[CTA_EXPECT_TUPLE]) {
2517 /* delete a single expect by tuple */
2518 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2519 if (err < 0)
2520 return err;
2522 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2523 if (err < 0)
2524 return err;
2526 /* bump usage count to 2 */
2527 exp = nf_ct_expect_find_get(net, zone, &tuple);
2528 if (!exp)
2529 return -ENOENT;
2531 if (cda[CTA_EXPECT_ID]) {
2532 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2533 if (ntohl(id) != (u32)(unsigned long)exp) {
2534 nf_ct_expect_put(exp);
2535 return -ENOENT;
2539 /* after list removal, usage count == 1 */
2540 spin_lock_bh(&nf_conntrack_lock);
2541 if (del_timer(&exp->timeout)) {
2542 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
2543 nlmsg_report(nlh));
2544 nf_ct_expect_put(exp);
2546 spin_unlock_bh(&nf_conntrack_lock);
2547 /* have to put what we 'get' above.
2548 * after this line usage count == 0 */
2549 nf_ct_expect_put(exp);
2550 } else if (cda[CTA_EXPECT_HELP_NAME]) {
2551 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2552 struct nf_conn_help *m_help;
2554 /* delete all expectations for this helper */
2555 spin_lock_bh(&nf_conntrack_lock);
2556 for (i = 0; i < nf_ct_expect_hsize; i++) {
2557 hlist_for_each_entry_safe(exp, n, next,
2558 &net->ct.expect_hash[i],
2559 hnode) {
2560 m_help = nfct_help(exp->master);
2561 if (!strcmp(m_help->helper->name, name) &&
2562 del_timer(&exp->timeout)) {
2563 nf_ct_unlink_expect_report(exp,
2564 NETLINK_CB(skb).portid,
2565 nlmsg_report(nlh));
2566 nf_ct_expect_put(exp);
2570 spin_unlock_bh(&nf_conntrack_lock);
2571 } else {
2572 /* This basically means we have to flush everything*/
2573 spin_lock_bh(&nf_conntrack_lock);
2574 for (i = 0; i < nf_ct_expect_hsize; i++) {
2575 hlist_for_each_entry_safe(exp, n, next,
2576 &net->ct.expect_hash[i],
2577 hnode) {
2578 if (del_timer(&exp->timeout)) {
2579 nf_ct_unlink_expect_report(exp,
2580 NETLINK_CB(skb).portid,
2581 nlmsg_report(nlh));
2582 nf_ct_expect_put(exp);
2586 spin_unlock_bh(&nf_conntrack_lock);
2589 return 0;
2591 static int
2592 ctnetlink_change_expect(struct nf_conntrack_expect *x,
2593 const struct nlattr * const cda[])
2595 if (cda[CTA_EXPECT_TIMEOUT]) {
2596 if (!del_timer(&x->timeout))
2597 return -ETIME;
2599 x->timeout.expires = jiffies +
2600 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2601 add_timer(&x->timeout);
2603 return 0;
2606 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2607 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
2608 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
2611 static int
2612 ctnetlink_parse_expect_nat(const struct nlattr *attr,
2613 struct nf_conntrack_expect *exp,
2614 u_int8_t u3)
2616 #ifdef CONFIG_NF_NAT_NEEDED
2617 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2618 struct nf_conntrack_tuple nat_tuple = {};
2619 int err;
2621 nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2623 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2624 return -EINVAL;
2626 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2627 &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2628 if (err < 0)
2629 return err;
2631 exp->saved_addr = nat_tuple.src.u3;
2632 exp->saved_proto = nat_tuple.src.u;
2633 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2635 return 0;
2636 #else
2637 return -EOPNOTSUPP;
2638 #endif
2641 static int
2642 ctnetlink_create_expect(struct net *net, u16 zone,
2643 const struct nlattr * const cda[],
2644 u_int8_t u3,
2645 u32 portid, int report)
2647 struct nf_conntrack_tuple tuple, mask, master_tuple;
2648 struct nf_conntrack_tuple_hash *h = NULL;
2649 struct nf_conntrack_expect *exp;
2650 struct nf_conn *ct;
2651 struct nf_conn_help *help;
2652 struct nf_conntrack_helper *helper = NULL;
2653 u_int32_t class = 0;
2654 int err = 0;
2656 /* caller guarantees that those three CTA_EXPECT_* exist */
2657 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2658 if (err < 0)
2659 return err;
2660 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2661 if (err < 0)
2662 return err;
2663 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2664 if (err < 0)
2665 return err;
2667 /* Look for master conntrack of this expectation */
2668 h = nf_conntrack_find_get(net, zone, &master_tuple);
2669 if (!h)
2670 return -ENOENT;
2671 ct = nf_ct_tuplehash_to_ctrack(h);
2673 /* Look for helper of this expectation */
2674 if (cda[CTA_EXPECT_HELP_NAME]) {
2675 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2677 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2678 nf_ct_protonum(ct));
2679 if (helper == NULL) {
2680 #ifdef CONFIG_MODULES
2681 if (request_module("nfct-helper-%s", helpname) < 0) {
2682 err = -EOPNOTSUPP;
2683 goto out;
2686 helper = __nf_conntrack_helper_find(helpname,
2687 nf_ct_l3num(ct),
2688 nf_ct_protonum(ct));
2689 if (helper) {
2690 err = -EAGAIN;
2691 goto out;
2693 #endif
2694 err = -EOPNOTSUPP;
2695 goto out;
2699 if (cda[CTA_EXPECT_CLASS] && helper) {
2700 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2701 if (class > helper->expect_class_max) {
2702 err = -EINVAL;
2703 goto out;
2706 exp = nf_ct_expect_alloc(ct);
2707 if (!exp) {
2708 err = -ENOMEM;
2709 goto out;
2711 help = nfct_help(ct);
2712 if (!help) {
2713 if (!cda[CTA_EXPECT_TIMEOUT]) {
2714 err = -EINVAL;
2715 goto err_out;
2717 exp->timeout.expires =
2718 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2720 exp->flags = NF_CT_EXPECT_USERSPACE;
2721 if (cda[CTA_EXPECT_FLAGS]) {
2722 exp->flags |=
2723 ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2725 } else {
2726 if (cda[CTA_EXPECT_FLAGS]) {
2727 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2728 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
2729 } else
2730 exp->flags = 0;
2732 if (cda[CTA_EXPECT_FN]) {
2733 const char *name = nla_data(cda[CTA_EXPECT_FN]);
2734 struct nf_ct_helper_expectfn *expfn;
2736 expfn = nf_ct_helper_expectfn_find_by_name(name);
2737 if (expfn == NULL) {
2738 err = -EINVAL;
2739 goto err_out;
2741 exp->expectfn = expfn->expectfn;
2742 } else
2743 exp->expectfn = NULL;
2745 exp->class = class;
2746 exp->master = ct;
2747 exp->helper = helper;
2748 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
2749 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
2750 exp->mask.src.u.all = mask.src.u.all;
2752 if (cda[CTA_EXPECT_NAT]) {
2753 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2754 exp, u3);
2755 if (err < 0)
2756 goto err_out;
2758 err = nf_ct_expect_related_report(exp, portid, report);
2759 err_out:
2760 nf_ct_expect_put(exp);
2761 out:
2762 nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
2763 return err;
2766 static int
2767 ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2768 const struct nlmsghdr *nlh,
2769 const struct nlattr * const cda[])
2771 struct net *net = sock_net(ctnl);
2772 struct nf_conntrack_tuple tuple;
2773 struct nf_conntrack_expect *exp;
2774 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2775 u_int8_t u3 = nfmsg->nfgen_family;
2776 u16 zone;
2777 int err;
2779 if (!cda[CTA_EXPECT_TUPLE]
2780 || !cda[CTA_EXPECT_MASK]
2781 || !cda[CTA_EXPECT_MASTER])
2782 return -EINVAL;
2784 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2785 if (err < 0)
2786 return err;
2788 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2789 if (err < 0)
2790 return err;
2792 spin_lock_bh(&nf_conntrack_lock);
2793 exp = __nf_ct_expect_find(net, zone, &tuple);
2795 if (!exp) {
2796 spin_unlock_bh(&nf_conntrack_lock);
2797 err = -ENOENT;
2798 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2799 err = ctnetlink_create_expect(net, zone, cda,
2801 NETLINK_CB(skb).portid,
2802 nlmsg_report(nlh));
2804 return err;
2807 err = -EEXIST;
2808 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
2809 err = ctnetlink_change_expect(exp, cda);
2810 spin_unlock_bh(&nf_conntrack_lock);
2812 return err;
2815 static int
2816 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
2817 const struct ip_conntrack_stat *st)
2819 struct nlmsghdr *nlh;
2820 struct nfgenmsg *nfmsg;
2821 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2823 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
2824 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2825 if (nlh == NULL)
2826 goto nlmsg_failure;
2828 nfmsg = nlmsg_data(nlh);
2829 nfmsg->nfgen_family = AF_UNSPEC;
2830 nfmsg->version = NFNETLINK_V0;
2831 nfmsg->res_id = htons(cpu);
2833 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
2834 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
2835 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
2836 goto nla_put_failure;
2838 nlmsg_end(skb, nlh);
2839 return skb->len;
2841 nla_put_failure:
2842 nlmsg_failure:
2843 nlmsg_cancel(skb, nlh);
2844 return -1;
2847 static int
2848 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2850 int cpu;
2851 struct net *net = sock_net(skb->sk);
2853 if (cb->args[0] == nr_cpu_ids)
2854 return 0;
2856 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2857 const struct ip_conntrack_stat *st;
2859 if (!cpu_possible(cpu))
2860 continue;
2862 st = per_cpu_ptr(net->ct.stat, cpu);
2863 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
2864 cb->nlh->nlmsg_seq,
2865 cpu, st) < 0)
2866 break;
2868 cb->args[0] = cpu;
2870 return skb->len;
2873 static int
2874 ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
2875 const struct nlmsghdr *nlh,
2876 const struct nlattr * const cda[])
2878 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2879 struct netlink_dump_control c = {
2880 .dump = ctnetlink_exp_stat_cpu_dump,
2882 return netlink_dump_start(ctnl, skb, nlh, &c);
2885 return 0;
2888 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2889 static struct nf_ct_event_notifier ctnl_notifier = {
2890 .fcn = ctnetlink_conntrack_event,
2893 static struct nf_exp_event_notifier ctnl_notifier_exp = {
2894 .fcn = ctnetlink_expect_event,
2896 #endif
2898 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
2899 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
2900 .attr_count = CTA_MAX,
2901 .policy = ct_nla_policy },
2902 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
2903 .attr_count = CTA_MAX,
2904 .policy = ct_nla_policy },
2905 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
2906 .attr_count = CTA_MAX,
2907 .policy = ct_nla_policy },
2908 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
2909 .attr_count = CTA_MAX,
2910 .policy = ct_nla_policy },
2911 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
2912 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
2913 [IPCTNL_MSG_CT_GET_DYING] = { .call = ctnetlink_get_ct_dying },
2914 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { .call = ctnetlink_get_ct_unconfirmed },
2917 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
2918 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
2919 .attr_count = CTA_EXPECT_MAX,
2920 .policy = exp_nla_policy },
2921 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
2922 .attr_count = CTA_EXPECT_MAX,
2923 .policy = exp_nla_policy },
2924 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
2925 .attr_count = CTA_EXPECT_MAX,
2926 .policy = exp_nla_policy },
2927 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
2930 static const struct nfnetlink_subsystem ctnl_subsys = {
2931 .name = "conntrack",
2932 .subsys_id = NFNL_SUBSYS_CTNETLINK,
2933 .cb_count = IPCTNL_MSG_MAX,
2934 .cb = ctnl_cb,
2937 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
2938 .name = "conntrack_expect",
2939 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
2940 .cb_count = IPCTNL_MSG_EXP_MAX,
2941 .cb = ctnl_exp_cb,
2944 MODULE_ALIAS("ip_conntrack_netlink");
2945 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
2946 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
2948 static int __net_init ctnetlink_net_init(struct net *net)
2950 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2951 int ret;
2953 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
2954 if (ret < 0) {
2955 pr_err("ctnetlink_init: cannot register notifier.\n");
2956 goto err_out;
2959 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
2960 if (ret < 0) {
2961 pr_err("ctnetlink_init: cannot expect register notifier.\n");
2962 goto err_unreg_notifier;
2964 #endif
2965 return 0;
2967 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2968 err_unreg_notifier:
2969 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2970 err_out:
2971 return ret;
2972 #endif
2975 static void ctnetlink_net_exit(struct net *net)
2977 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2978 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
2979 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2980 #endif
2983 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
2985 struct net *net;
2987 list_for_each_entry(net, net_exit_list, exit_list)
2988 ctnetlink_net_exit(net);
2991 static struct pernet_operations ctnetlink_net_ops = {
2992 .init = ctnetlink_net_init,
2993 .exit_batch = ctnetlink_net_exit_batch,
2996 static int __init ctnetlink_init(void)
2998 int ret;
3000 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
3001 ret = nfnetlink_subsys_register(&ctnl_subsys);
3002 if (ret < 0) {
3003 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3004 goto err_out;
3007 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3008 if (ret < 0) {
3009 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3010 goto err_unreg_subsys;
3013 ret = register_pernet_subsys(&ctnetlink_net_ops);
3014 if (ret < 0) {
3015 pr_err("ctnetlink_init: cannot register pernet operations\n");
3016 goto err_unreg_exp_subsys;
3018 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3019 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3020 RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
3021 #endif
3022 return 0;
3024 err_unreg_exp_subsys:
3025 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3026 err_unreg_subsys:
3027 nfnetlink_subsys_unregister(&ctnl_subsys);
3028 err_out:
3029 return ret;
3032 static void __exit ctnetlink_exit(void)
3034 pr_info("ctnetlink: unregistering from nfnetlink.\n");
3036 unregister_pernet_subsys(&ctnetlink_net_ops);
3037 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3038 nfnetlink_subsys_unregister(&ctnl_subsys);
3039 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3040 RCU_INIT_POINTER(nfq_ct_hook, NULL);
3041 #endif
3044 module_init(ctnetlink_init);
3045 module_exit(ctnetlink_exit);