netfilter: ctnetlink: allow to set helper for new expectations
[linux-2.6.git] / net / netfilter / nf_conntrack_netlink.c
blob1b0aea620d62f57b7f94dc14feeee64117d29621
1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
35 #include <net/sock.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_tuple.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_zones.h>
45 #include <net/netfilter/nf_conntrack_timestamp.h>
46 #ifdef CONFIG_NF_NAT_NEEDED
47 #include <net/netfilter/nf_nat_core.h>
48 #include <net/netfilter/nf_nat_protocol.h>
49 #endif
51 #include <linux/netfilter/nfnetlink.h>
52 #include <linux/netfilter/nfnetlink_conntrack.h>
54 MODULE_LICENSE("GPL");
56 static char __initdata version[] = "0.93";
58 static inline int
59 ctnetlink_dump_tuples_proto(struct sk_buff *skb,
60 const struct nf_conntrack_tuple *tuple,
61 struct nf_conntrack_l4proto *l4proto)
63 int ret = 0;
64 struct nlattr *nest_parms;
66 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
67 if (!nest_parms)
68 goto nla_put_failure;
69 NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum);
71 if (likely(l4proto->tuple_to_nlattr))
72 ret = l4proto->tuple_to_nlattr(skb, tuple);
74 nla_nest_end(skb, nest_parms);
76 return ret;
78 nla_put_failure:
79 return -1;
82 static inline int
83 ctnetlink_dump_tuples_ip(struct sk_buff *skb,
84 const struct nf_conntrack_tuple *tuple,
85 struct nf_conntrack_l3proto *l3proto)
87 int ret = 0;
88 struct nlattr *nest_parms;
90 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
91 if (!nest_parms)
92 goto nla_put_failure;
94 if (likely(l3proto->tuple_to_nlattr))
95 ret = l3proto->tuple_to_nlattr(skb, tuple);
97 nla_nest_end(skb, nest_parms);
99 return ret;
101 nla_put_failure:
102 return -1;
105 static int
106 ctnetlink_dump_tuples(struct sk_buff *skb,
107 const struct nf_conntrack_tuple *tuple)
109 int ret;
110 struct nf_conntrack_l3proto *l3proto;
111 struct nf_conntrack_l4proto *l4proto;
113 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
114 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
116 if (unlikely(ret < 0))
117 return ret;
119 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
120 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
122 return ret;
125 static inline int
126 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
128 NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status));
129 return 0;
131 nla_put_failure:
132 return -1;
135 static inline int
136 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
138 long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
140 if (timeout < 0)
141 timeout = 0;
143 NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout));
144 return 0;
146 nla_put_failure:
147 return -1;
150 static inline int
151 ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
153 struct nf_conntrack_l4proto *l4proto;
154 struct nlattr *nest_proto;
155 int ret;
157 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
158 if (!l4proto->to_nlattr)
159 return 0;
161 nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
162 if (!nest_proto)
163 goto nla_put_failure;
165 ret = l4proto->to_nlattr(skb, nest_proto, ct);
167 nla_nest_end(skb, nest_proto);
169 return ret;
171 nla_put_failure:
172 return -1;
175 static inline int
176 ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
178 struct nlattr *nest_helper;
179 const struct nf_conn_help *help = nfct_help(ct);
180 struct nf_conntrack_helper *helper;
182 if (!help)
183 return 0;
185 helper = rcu_dereference(help->helper);
186 if (!helper)
187 goto out;
189 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
190 if (!nest_helper)
191 goto nla_put_failure;
192 NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name);
194 if (helper->to_nlattr)
195 helper->to_nlattr(skb, ct);
197 nla_nest_end(skb, nest_helper);
198 out:
199 return 0;
201 nla_put_failure:
202 return -1;
205 static int
206 dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
207 enum ip_conntrack_dir dir)
209 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
210 struct nlattr *nest_count;
212 nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
213 if (!nest_count)
214 goto nla_put_failure;
216 NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts));
217 NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes));
219 nla_nest_end(skb, nest_count);
221 return 0;
223 nla_put_failure:
224 return -1;
227 static int
228 ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
229 enum ip_conntrack_dir dir, int type)
231 struct nf_conn_counter *acct;
232 u64 pkts, bytes;
234 acct = nf_conn_acct_find(ct);
235 if (!acct)
236 return 0;
238 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
239 pkts = atomic64_xchg(&acct[dir].packets, 0);
240 bytes = atomic64_xchg(&acct[dir].bytes, 0);
241 } else {
242 pkts = atomic64_read(&acct[dir].packets);
243 bytes = atomic64_read(&acct[dir].bytes);
245 return dump_counters(skb, pkts, bytes, dir);
248 static int
249 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
251 struct nlattr *nest_count;
252 const struct nf_conn_tstamp *tstamp;
254 tstamp = nf_conn_tstamp_find(ct);
255 if (!tstamp)
256 return 0;
258 nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
259 if (!nest_count)
260 goto nla_put_failure;
262 NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
263 if (tstamp->stop != 0) {
264 NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
265 cpu_to_be64(tstamp->stop));
267 nla_nest_end(skb, nest_count);
269 return 0;
271 nla_put_failure:
272 return -1;
275 #ifdef CONFIG_NF_CONNTRACK_MARK
276 static inline int
277 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
279 NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark));
280 return 0;
282 nla_put_failure:
283 return -1;
285 #else
286 #define ctnetlink_dump_mark(a, b) (0)
287 #endif
289 #ifdef CONFIG_NF_CONNTRACK_SECMARK
290 static inline int
291 ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
293 struct nlattr *nest_secctx;
294 int len, ret;
295 char *secctx;
297 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
298 if (ret)
299 return 0;
301 ret = -1;
302 nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
303 if (!nest_secctx)
304 goto nla_put_failure;
306 NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
307 nla_nest_end(skb, nest_secctx);
309 ret = 0;
310 nla_put_failure:
311 security_release_secctx(secctx, len);
312 return ret;
314 #else
315 #define ctnetlink_dump_secctx(a, b) (0)
316 #endif
318 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
320 static inline int
321 ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
323 struct nlattr *nest_parms;
325 if (!(ct->status & IPS_EXPECTED))
326 return 0;
328 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
329 if (!nest_parms)
330 goto nla_put_failure;
331 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
332 goto nla_put_failure;
333 nla_nest_end(skb, nest_parms);
335 return 0;
337 nla_put_failure:
338 return -1;
341 #ifdef CONFIG_NF_NAT_NEEDED
342 static int
343 dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
345 struct nlattr *nest_parms;
347 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
348 if (!nest_parms)
349 goto nla_put_failure;
351 NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS,
352 htonl(natseq->correction_pos));
353 NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
354 htonl(natseq->offset_before));
355 NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
356 htonl(natseq->offset_after));
358 nla_nest_end(skb, nest_parms);
360 return 0;
362 nla_put_failure:
363 return -1;
366 static inline int
367 ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
369 struct nf_nat_seq *natseq;
370 struct nf_conn_nat *nat = nfct_nat(ct);
372 if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
373 return 0;
375 natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
376 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
377 return -1;
379 natseq = &nat->seq[IP_CT_DIR_REPLY];
380 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
381 return -1;
383 return 0;
385 #else
386 #define ctnetlink_dump_nat_seq_adj(a, b) (0)
387 #endif
389 static inline int
390 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
392 NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct));
393 return 0;
395 nla_put_failure:
396 return -1;
399 static inline int
400 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
402 NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)));
403 return 0;
405 nla_put_failure:
406 return -1;
409 static int
410 ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
411 struct nf_conn *ct)
413 struct nlmsghdr *nlh;
414 struct nfgenmsg *nfmsg;
415 struct nlattr *nest_parms;
416 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
418 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
419 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
420 if (nlh == NULL)
421 goto nlmsg_failure;
423 nfmsg = nlmsg_data(nlh);
424 nfmsg->nfgen_family = nf_ct_l3num(ct);
425 nfmsg->version = NFNETLINK_V0;
426 nfmsg->res_id = 0;
428 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
429 if (!nest_parms)
430 goto nla_put_failure;
431 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
432 goto nla_put_failure;
433 nla_nest_end(skb, nest_parms);
435 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
436 if (!nest_parms)
437 goto nla_put_failure;
438 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
439 goto nla_put_failure;
440 nla_nest_end(skb, nest_parms);
442 if (nf_ct_zone(ct))
443 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
445 if (ctnetlink_dump_status(skb, ct) < 0 ||
446 ctnetlink_dump_timeout(skb, ct) < 0 ||
447 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
448 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
449 ctnetlink_dump_timestamp(skb, ct) < 0 ||
450 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
451 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
452 ctnetlink_dump_mark(skb, ct) < 0 ||
453 ctnetlink_dump_secctx(skb, ct) < 0 ||
454 ctnetlink_dump_id(skb, ct) < 0 ||
455 ctnetlink_dump_use(skb, ct) < 0 ||
456 ctnetlink_dump_master(skb, ct) < 0 ||
457 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
458 goto nla_put_failure;
460 nlmsg_end(skb, nlh);
461 return skb->len;
463 nlmsg_failure:
464 nla_put_failure:
465 nlmsg_cancel(skb, nlh);
466 return -1;
469 #ifdef CONFIG_NF_CONNTRACK_EVENTS
470 static inline size_t
471 ctnetlink_proto_size(const struct nf_conn *ct)
473 struct nf_conntrack_l3proto *l3proto;
474 struct nf_conntrack_l4proto *l4proto;
475 size_t len = 0;
477 rcu_read_lock();
478 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
479 len += l3proto->nla_size;
481 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
482 len += l4proto->nla_size;
483 rcu_read_unlock();
485 return len;
488 static inline size_t
489 ctnetlink_counters_size(const struct nf_conn *ct)
491 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
492 return 0;
493 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
494 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
495 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
499 static inline int
500 ctnetlink_secctx_size(const struct nf_conn *ct)
502 #ifdef CONFIG_NF_CONNTRACK_SECMARK
503 int len, ret;
505 ret = security_secid_to_secctx(ct->secmark, NULL, &len);
506 if (ret)
507 return 0;
509 return nla_total_size(0) /* CTA_SECCTX */
510 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
511 #else
512 return 0;
513 #endif
516 static inline size_t
517 ctnetlink_timestamp_size(const struct nf_conn *ct)
519 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
520 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
521 return 0;
522 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
523 #else
524 return 0;
525 #endif
528 static inline size_t
529 ctnetlink_nlmsg_size(const struct nf_conn *ct)
531 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
532 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
533 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
534 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
535 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
536 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
537 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
538 + ctnetlink_counters_size(ct)
539 + ctnetlink_timestamp_size(ct)
540 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
541 + nla_total_size(0) /* CTA_PROTOINFO */
542 + nla_total_size(0) /* CTA_HELP */
543 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
544 + ctnetlink_secctx_size(ct)
545 #ifdef CONFIG_NF_NAT_NEEDED
546 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
547 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
548 #endif
549 #ifdef CONFIG_NF_CONNTRACK_MARK
550 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
551 #endif
552 + ctnetlink_proto_size(ct)
556 static int
557 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
559 struct net *net;
560 struct nlmsghdr *nlh;
561 struct nfgenmsg *nfmsg;
562 struct nlattr *nest_parms;
563 struct nf_conn *ct = item->ct;
564 struct sk_buff *skb;
565 unsigned int type;
566 unsigned int flags = 0, group;
567 int err;
569 /* ignore our fake conntrack entry */
570 if (nf_ct_is_untracked(ct))
571 return 0;
573 if (events & (1 << IPCT_DESTROY)) {
574 type = IPCTNL_MSG_CT_DELETE;
575 group = NFNLGRP_CONNTRACK_DESTROY;
576 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
577 type = IPCTNL_MSG_CT_NEW;
578 flags = NLM_F_CREATE|NLM_F_EXCL;
579 group = NFNLGRP_CONNTRACK_NEW;
580 } else if (events) {
581 type = IPCTNL_MSG_CT_NEW;
582 group = NFNLGRP_CONNTRACK_UPDATE;
583 } else
584 return 0;
586 net = nf_ct_net(ct);
587 if (!item->report && !nfnetlink_has_listeners(net, group))
588 return 0;
590 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
591 if (skb == NULL)
592 goto errout;
594 type |= NFNL_SUBSYS_CTNETLINK << 8;
595 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
596 if (nlh == NULL)
597 goto nlmsg_failure;
599 nfmsg = nlmsg_data(nlh);
600 nfmsg->nfgen_family = nf_ct_l3num(ct);
601 nfmsg->version = NFNETLINK_V0;
602 nfmsg->res_id = 0;
604 rcu_read_lock();
605 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
606 if (!nest_parms)
607 goto nla_put_failure;
608 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
609 goto nla_put_failure;
610 nla_nest_end(skb, nest_parms);
612 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
613 if (!nest_parms)
614 goto nla_put_failure;
615 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
616 goto nla_put_failure;
617 nla_nest_end(skb, nest_parms);
619 if (nf_ct_zone(ct))
620 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
622 if (ctnetlink_dump_id(skb, ct) < 0)
623 goto nla_put_failure;
625 if (ctnetlink_dump_status(skb, ct) < 0)
626 goto nla_put_failure;
628 if (events & (1 << IPCT_DESTROY)) {
629 if (ctnetlink_dump_counters(skb, ct,
630 IP_CT_DIR_ORIGINAL, type) < 0 ||
631 ctnetlink_dump_counters(skb, ct,
632 IP_CT_DIR_REPLY, type) < 0 ||
633 ctnetlink_dump_timestamp(skb, ct) < 0)
634 goto nla_put_failure;
635 } else {
636 if (ctnetlink_dump_timeout(skb, ct) < 0)
637 goto nla_put_failure;
639 if (events & (1 << IPCT_PROTOINFO)
640 && ctnetlink_dump_protoinfo(skb, ct) < 0)
641 goto nla_put_failure;
643 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
644 && ctnetlink_dump_helpinfo(skb, ct) < 0)
645 goto nla_put_failure;
647 #ifdef CONFIG_NF_CONNTRACK_SECMARK
648 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
649 && ctnetlink_dump_secctx(skb, ct) < 0)
650 goto nla_put_failure;
651 #endif
653 if (events & (1 << IPCT_RELATED) &&
654 ctnetlink_dump_master(skb, ct) < 0)
655 goto nla_put_failure;
657 if (events & (1 << IPCT_NATSEQADJ) &&
658 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
659 goto nla_put_failure;
662 #ifdef CONFIG_NF_CONNTRACK_MARK
663 if ((events & (1 << IPCT_MARK) || ct->mark)
664 && ctnetlink_dump_mark(skb, ct) < 0)
665 goto nla_put_failure;
666 #endif
667 rcu_read_unlock();
669 nlmsg_end(skb, nlh);
670 err = nfnetlink_send(skb, net, item->pid, group, item->report,
671 GFP_ATOMIC);
672 if (err == -ENOBUFS || err == -EAGAIN)
673 return -ENOBUFS;
675 return 0;
677 nla_put_failure:
678 rcu_read_unlock();
679 nlmsg_cancel(skb, nlh);
680 nlmsg_failure:
681 kfree_skb(skb);
682 errout:
683 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
684 return -ENOBUFS;
686 return 0;
688 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
690 static int ctnetlink_done(struct netlink_callback *cb)
692 if (cb->args[1])
693 nf_ct_put((struct nf_conn *)cb->args[1]);
694 if (cb->data)
695 kfree(cb->data);
696 return 0;
699 struct ctnetlink_dump_filter {
700 struct {
701 u_int32_t val;
702 u_int32_t mask;
703 } mark;
706 static int
707 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
709 struct net *net = sock_net(skb->sk);
710 struct nf_conn *ct, *last;
711 struct nf_conntrack_tuple_hash *h;
712 struct hlist_nulls_node *n;
713 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
714 u_int8_t l3proto = nfmsg->nfgen_family;
715 #ifdef CONFIG_NF_CONNTRACK_MARK
716 const struct ctnetlink_dump_filter *filter = cb->data;
717 #endif
718 spin_lock_bh(&nf_conntrack_lock);
719 last = (struct nf_conn *)cb->args[1];
720 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
721 restart:
722 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
723 hnnode) {
724 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
725 continue;
726 ct = nf_ct_tuplehash_to_ctrack(h);
727 /* Dump entries of a given L3 protocol number.
728 * If it is not specified, ie. l3proto == 0,
729 * then dump everything. */
730 if (l3proto && nf_ct_l3num(ct) != l3proto)
731 continue;
732 if (cb->args[1]) {
733 if (ct != last)
734 continue;
735 cb->args[1] = 0;
737 #ifdef CONFIG_NF_CONNTRACK_MARK
738 if (filter && !((ct->mark & filter->mark.mask) ==
739 filter->mark.val)) {
740 continue;
742 #endif
743 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
744 cb->nlh->nlmsg_seq,
745 NFNL_MSG_TYPE(
746 cb->nlh->nlmsg_type),
747 ct) < 0) {
748 nf_conntrack_get(&ct->ct_general);
749 cb->args[1] = (unsigned long)ct;
750 goto out;
753 if (cb->args[1]) {
754 cb->args[1] = 0;
755 goto restart;
758 out:
759 spin_unlock_bh(&nf_conntrack_lock);
760 if (last)
761 nf_ct_put(last);
763 return skb->len;
766 static inline int
767 ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
769 struct nlattr *tb[CTA_IP_MAX+1];
770 struct nf_conntrack_l3proto *l3proto;
771 int ret = 0;
773 nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
775 rcu_read_lock();
776 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
778 if (likely(l3proto->nlattr_to_tuple)) {
779 ret = nla_validate_nested(attr, CTA_IP_MAX,
780 l3proto->nla_policy);
781 if (ret == 0)
782 ret = l3proto->nlattr_to_tuple(tb, tuple);
785 rcu_read_unlock();
787 return ret;
790 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
791 [CTA_PROTO_NUM] = { .type = NLA_U8 },
794 static inline int
795 ctnetlink_parse_tuple_proto(struct nlattr *attr,
796 struct nf_conntrack_tuple *tuple)
798 struct nlattr *tb[CTA_PROTO_MAX+1];
799 struct nf_conntrack_l4proto *l4proto;
800 int ret = 0;
802 ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
803 if (ret < 0)
804 return ret;
806 if (!tb[CTA_PROTO_NUM])
807 return -EINVAL;
808 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
810 rcu_read_lock();
811 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
813 if (likely(l4proto->nlattr_to_tuple)) {
814 ret = nla_validate_nested(attr, CTA_PROTO_MAX,
815 l4proto->nla_policy);
816 if (ret == 0)
817 ret = l4proto->nlattr_to_tuple(tb, tuple);
820 rcu_read_unlock();
822 return ret;
825 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
826 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
827 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
830 static int
831 ctnetlink_parse_tuple(const struct nlattr * const cda[],
832 struct nf_conntrack_tuple *tuple,
833 enum ctattr_type type, u_int8_t l3num)
835 struct nlattr *tb[CTA_TUPLE_MAX+1];
836 int err;
838 memset(tuple, 0, sizeof(*tuple));
840 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
842 if (!tb[CTA_TUPLE_IP])
843 return -EINVAL;
845 tuple->src.l3num = l3num;
847 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
848 if (err < 0)
849 return err;
851 if (!tb[CTA_TUPLE_PROTO])
852 return -EINVAL;
854 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
855 if (err < 0)
856 return err;
858 /* orig and expect tuples get DIR_ORIGINAL */
859 if (type == CTA_TUPLE_REPLY)
860 tuple->dst.dir = IP_CT_DIR_REPLY;
861 else
862 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
864 return 0;
867 static int
868 ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
870 if (attr)
871 #ifdef CONFIG_NF_CONNTRACK_ZONES
872 *zone = ntohs(nla_get_be16(attr));
873 #else
874 return -EOPNOTSUPP;
875 #endif
876 else
877 *zone = 0;
879 return 0;
882 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
883 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING },
886 static inline int
887 ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
889 struct nlattr *tb[CTA_HELP_MAX+1];
891 nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
893 if (!tb[CTA_HELP_NAME])
894 return -EINVAL;
896 *helper_name = nla_data(tb[CTA_HELP_NAME]);
898 return 0;
901 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
902 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
903 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
904 [CTA_STATUS] = { .type = NLA_U32 },
905 [CTA_PROTOINFO] = { .type = NLA_NESTED },
906 [CTA_HELP] = { .type = NLA_NESTED },
907 [CTA_NAT_SRC] = { .type = NLA_NESTED },
908 [CTA_TIMEOUT] = { .type = NLA_U32 },
909 [CTA_MARK] = { .type = NLA_U32 },
910 [CTA_ID] = { .type = NLA_U32 },
911 [CTA_NAT_DST] = { .type = NLA_NESTED },
912 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
913 [CTA_ZONE] = { .type = NLA_U16 },
914 [CTA_MARK_MASK] = { .type = NLA_U32 },
917 static int
918 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
919 const struct nlmsghdr *nlh,
920 const struct nlattr * const cda[])
922 struct net *net = sock_net(ctnl);
923 struct nf_conntrack_tuple_hash *h;
924 struct nf_conntrack_tuple tuple;
925 struct nf_conn *ct;
926 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
927 u_int8_t u3 = nfmsg->nfgen_family;
928 u16 zone;
929 int err;
931 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
932 if (err < 0)
933 return err;
935 if (cda[CTA_TUPLE_ORIG])
936 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
937 else if (cda[CTA_TUPLE_REPLY])
938 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
939 else {
940 /* Flush the whole table */
941 nf_conntrack_flush_report(net,
942 NETLINK_CB(skb).pid,
943 nlmsg_report(nlh));
944 return 0;
947 if (err < 0)
948 return err;
950 h = nf_conntrack_find_get(net, zone, &tuple);
951 if (!h)
952 return -ENOENT;
954 ct = nf_ct_tuplehash_to_ctrack(h);
956 if (cda[CTA_ID]) {
957 u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
958 if (id != (u32)(unsigned long)ct) {
959 nf_ct_put(ct);
960 return -ENOENT;
964 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
965 NETLINK_CB(skb).pid,
966 nlmsg_report(nlh)) < 0) {
967 nf_ct_delete_from_lists(ct);
968 /* we failed to report the event, try later */
969 nf_ct_insert_dying_list(ct);
970 nf_ct_put(ct);
971 return 0;
974 /* death_by_timeout would report the event again */
975 set_bit(IPS_DYING_BIT, &ct->status);
977 nf_ct_kill(ct);
978 nf_ct_put(ct);
980 return 0;
983 static int
984 ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
985 const struct nlmsghdr *nlh,
986 const struct nlattr * const cda[])
988 struct net *net = sock_net(ctnl);
989 struct nf_conntrack_tuple_hash *h;
990 struct nf_conntrack_tuple tuple;
991 struct nf_conn *ct;
992 struct sk_buff *skb2 = NULL;
993 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
994 u_int8_t u3 = nfmsg->nfgen_family;
995 u16 zone;
996 int err;
998 if (nlh->nlmsg_flags & NLM_F_DUMP) {
999 struct netlink_dump_control c = {
1000 .dump = ctnetlink_dump_table,
1001 .done = ctnetlink_done,
1003 #ifdef CONFIG_NF_CONNTRACK_MARK
1004 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1005 struct ctnetlink_dump_filter *filter;
1007 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1008 GFP_ATOMIC);
1009 if (filter == NULL)
1010 return -ENOMEM;
1012 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1013 filter->mark.mask =
1014 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1015 c.data = filter;
1017 #endif
1018 return netlink_dump_start(ctnl, skb, nlh, &c);
1021 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1022 if (err < 0)
1023 return err;
1025 if (cda[CTA_TUPLE_ORIG])
1026 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1027 else if (cda[CTA_TUPLE_REPLY])
1028 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1029 else
1030 return -EINVAL;
1032 if (err < 0)
1033 return err;
1035 h = nf_conntrack_find_get(net, zone, &tuple);
1036 if (!h)
1037 return -ENOENT;
1039 ct = nf_ct_tuplehash_to_ctrack(h);
1041 err = -ENOMEM;
1042 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1043 if (skb2 == NULL) {
1044 nf_ct_put(ct);
1045 return -ENOMEM;
1048 rcu_read_lock();
1049 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
1050 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1051 rcu_read_unlock();
1052 nf_ct_put(ct);
1053 if (err <= 0)
1054 goto free;
1056 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1057 if (err < 0)
1058 goto out;
1060 return 0;
1062 free:
1063 kfree_skb(skb2);
1064 out:
1065 /* this avoids a loop in nfnetlink. */
1066 return err == -EAGAIN ? -ENOBUFS : err;
1069 #ifdef CONFIG_NF_NAT_NEEDED
1070 static int
1071 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1072 enum nf_nat_manip_type manip,
1073 const struct nlattr *attr)
1075 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1077 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1078 if (!parse_nat_setup) {
1079 #ifdef CONFIG_MODULES
1080 rcu_read_unlock();
1081 spin_unlock_bh(&nf_conntrack_lock);
1082 nfnl_unlock();
1083 if (request_module("nf-nat-ipv4") < 0) {
1084 nfnl_lock();
1085 spin_lock_bh(&nf_conntrack_lock);
1086 rcu_read_lock();
1087 return -EOPNOTSUPP;
1089 nfnl_lock();
1090 spin_lock_bh(&nf_conntrack_lock);
1091 rcu_read_lock();
1092 if (nfnetlink_parse_nat_setup_hook)
1093 return -EAGAIN;
1094 #endif
1095 return -EOPNOTSUPP;
1098 return parse_nat_setup(ct, manip, attr);
1100 #endif
1102 static int
1103 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1105 unsigned long d;
1106 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1107 d = ct->status ^ status;
1109 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1110 /* unchangeable */
1111 return -EBUSY;
1113 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1114 /* SEEN_REPLY bit can only be set */
1115 return -EBUSY;
1117 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1118 /* ASSURED bit can only be set */
1119 return -EBUSY;
1121 /* Be careful here, modifying NAT bits can screw up things,
1122 * so don't let users modify them directly if they don't pass
1123 * nf_nat_range. */
1124 ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
1125 return 0;
1128 static int
1129 ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1131 #ifdef CONFIG_NF_NAT_NEEDED
1132 int ret;
1134 if (cda[CTA_NAT_DST]) {
1135 ret = ctnetlink_parse_nat_setup(ct,
1136 NF_NAT_MANIP_DST,
1137 cda[CTA_NAT_DST]);
1138 if (ret < 0)
1139 return ret;
1141 if (cda[CTA_NAT_SRC]) {
1142 ret = ctnetlink_parse_nat_setup(ct,
1143 NF_NAT_MANIP_SRC,
1144 cda[CTA_NAT_SRC]);
1145 if (ret < 0)
1146 return ret;
1148 return 0;
1149 #else
1150 return -EOPNOTSUPP;
1151 #endif
1154 static inline int
1155 ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1157 struct nf_conntrack_helper *helper;
1158 struct nf_conn_help *help = nfct_help(ct);
1159 char *helpname = NULL;
1160 int err;
1162 /* don't change helper of sibling connections */
1163 if (ct->master)
1164 return -EBUSY;
1166 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
1167 if (err < 0)
1168 return err;
1170 if (!strcmp(helpname, "")) {
1171 if (help && help->helper) {
1172 /* we had a helper before ... */
1173 nf_ct_remove_expectations(ct);
1174 RCU_INIT_POINTER(help->helper, NULL);
1177 return 0;
1180 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1181 nf_ct_protonum(ct));
1182 if (helper == NULL) {
1183 #ifdef CONFIG_MODULES
1184 spin_unlock_bh(&nf_conntrack_lock);
1186 if (request_module("nfct-helper-%s", helpname) < 0) {
1187 spin_lock_bh(&nf_conntrack_lock);
1188 return -EOPNOTSUPP;
1191 spin_lock_bh(&nf_conntrack_lock);
1192 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1193 nf_ct_protonum(ct));
1194 if (helper)
1195 return -EAGAIN;
1196 #endif
1197 return -EOPNOTSUPP;
1200 if (help) {
1201 if (help->helper == helper)
1202 return 0;
1203 if (help->helper)
1204 return -EBUSY;
1205 /* need to zero data of old helper */
1206 memset(&help->help, 0, sizeof(help->help));
1207 } else {
1208 /* we cannot set a helper for an existing conntrack */
1209 return -EOPNOTSUPP;
1212 rcu_assign_pointer(help->helper, helper);
1214 return 0;
1217 static inline int
1218 ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1220 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1222 if (!del_timer(&ct->timeout))
1223 return -ETIME;
1225 ct->timeout.expires = jiffies + timeout * HZ;
1226 add_timer(&ct->timeout);
1228 return 0;
1231 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1232 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1233 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1234 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1237 static inline int
1238 ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1240 const struct nlattr *attr = cda[CTA_PROTOINFO];
1241 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1242 struct nf_conntrack_l4proto *l4proto;
1243 int err = 0;
1245 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1247 rcu_read_lock();
1248 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1249 if (l4proto->from_nlattr)
1250 err = l4proto->from_nlattr(tb, ct);
1251 rcu_read_unlock();
1253 return err;
1256 #ifdef CONFIG_NF_NAT_NEEDED
1257 static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1258 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1259 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1260 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1263 static inline int
1264 change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1266 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1268 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1270 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1271 return -EINVAL;
1273 natseq->correction_pos =
1274 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
1276 if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
1277 return -EINVAL;
1279 natseq->offset_before =
1280 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
1282 if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
1283 return -EINVAL;
1285 natseq->offset_after =
1286 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
1288 return 0;
1291 static int
1292 ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
1293 const struct nlattr * const cda[])
1295 int ret = 0;
1296 struct nf_conn_nat *nat = nfct_nat(ct);
1298 if (!nat)
1299 return 0;
1301 if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
1302 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
1303 cda[CTA_NAT_SEQ_ADJ_ORIG]);
1304 if (ret < 0)
1305 return ret;
1307 ct->status |= IPS_SEQ_ADJUST;
1310 if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1311 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
1312 cda[CTA_NAT_SEQ_ADJ_REPLY]);
1313 if (ret < 0)
1314 return ret;
1316 ct->status |= IPS_SEQ_ADJUST;
1319 return 0;
1321 #endif
1323 static int
1324 ctnetlink_change_conntrack(struct nf_conn *ct,
1325 const struct nlattr * const cda[])
1327 int err;
1329 /* only allow NAT changes and master assignation for new conntracks */
1330 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1331 return -EOPNOTSUPP;
1333 if (cda[CTA_HELP]) {
1334 err = ctnetlink_change_helper(ct, cda);
1335 if (err < 0)
1336 return err;
1339 if (cda[CTA_TIMEOUT]) {
1340 err = ctnetlink_change_timeout(ct, cda);
1341 if (err < 0)
1342 return err;
1345 if (cda[CTA_STATUS]) {
1346 err = ctnetlink_change_status(ct, cda);
1347 if (err < 0)
1348 return err;
1351 if (cda[CTA_PROTOINFO]) {
1352 err = ctnetlink_change_protoinfo(ct, cda);
1353 if (err < 0)
1354 return err;
1357 #if defined(CONFIG_NF_CONNTRACK_MARK)
1358 if (cda[CTA_MARK])
1359 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1360 #endif
1362 #ifdef CONFIG_NF_NAT_NEEDED
1363 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1364 err = ctnetlink_change_nat_seq_adj(ct, cda);
1365 if (err < 0)
1366 return err;
1368 #endif
1370 return 0;
1373 static struct nf_conn *
1374 ctnetlink_create_conntrack(struct net *net, u16 zone,
1375 const struct nlattr * const cda[],
1376 struct nf_conntrack_tuple *otuple,
1377 struct nf_conntrack_tuple *rtuple,
1378 u8 u3)
1380 struct nf_conn *ct;
1381 int err = -EINVAL;
1382 struct nf_conntrack_helper *helper;
1383 struct nf_conn_tstamp *tstamp;
1385 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1386 if (IS_ERR(ct))
1387 return ERR_PTR(-ENOMEM);
1389 if (!cda[CTA_TIMEOUT])
1390 goto err1;
1391 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1393 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1395 rcu_read_lock();
1396 if (cda[CTA_HELP]) {
1397 char *helpname = NULL;
1399 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
1400 if (err < 0)
1401 goto err2;
1403 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1404 nf_ct_protonum(ct));
1405 if (helper == NULL) {
1406 rcu_read_unlock();
1407 #ifdef CONFIG_MODULES
1408 if (request_module("nfct-helper-%s", helpname) < 0) {
1409 err = -EOPNOTSUPP;
1410 goto err1;
1413 rcu_read_lock();
1414 helper = __nf_conntrack_helper_find(helpname,
1415 nf_ct_l3num(ct),
1416 nf_ct_protonum(ct));
1417 if (helper) {
1418 err = -EAGAIN;
1419 goto err2;
1421 rcu_read_unlock();
1422 #endif
1423 err = -EOPNOTSUPP;
1424 goto err1;
1425 } else {
1426 struct nf_conn_help *help;
1428 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1429 if (help == NULL) {
1430 err = -ENOMEM;
1431 goto err2;
1434 /* not in hash table yet so not strictly necessary */
1435 RCU_INIT_POINTER(help->helper, helper);
1437 } else {
1438 /* try an implicit helper assignation */
1439 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1440 if (err < 0)
1441 goto err2;
1444 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1445 err = ctnetlink_change_nat(ct, cda);
1446 if (err < 0)
1447 goto err2;
1450 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1451 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1452 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1453 /* we must add conntrack extensions before confirmation. */
1454 ct->status |= IPS_CONFIRMED;
1456 if (cda[CTA_STATUS]) {
1457 err = ctnetlink_change_status(ct, cda);
1458 if (err < 0)
1459 goto err2;
1462 #ifdef CONFIG_NF_NAT_NEEDED
1463 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1464 err = ctnetlink_change_nat_seq_adj(ct, cda);
1465 if (err < 0)
1466 goto err2;
1468 #endif
1470 memset(&ct->proto, 0, sizeof(ct->proto));
1471 if (cda[CTA_PROTOINFO]) {
1472 err = ctnetlink_change_protoinfo(ct, cda);
1473 if (err < 0)
1474 goto err2;
1477 #if defined(CONFIG_NF_CONNTRACK_MARK)
1478 if (cda[CTA_MARK])
1479 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1480 #endif
1482 /* setup master conntrack: this is a confirmed expectation */
1483 if (cda[CTA_TUPLE_MASTER]) {
1484 struct nf_conntrack_tuple master;
1485 struct nf_conntrack_tuple_hash *master_h;
1486 struct nf_conn *master_ct;
1488 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1489 if (err < 0)
1490 goto err2;
1492 master_h = nf_conntrack_find_get(net, zone, &master);
1493 if (master_h == NULL) {
1494 err = -ENOENT;
1495 goto err2;
1497 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1498 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1499 ct->master = master_ct;
1501 tstamp = nf_conn_tstamp_find(ct);
1502 if (tstamp)
1503 tstamp->start = ktime_to_ns(ktime_get_real());
1505 err = nf_conntrack_hash_check_insert(ct);
1506 if (err < 0)
1507 goto err2;
1509 rcu_read_unlock();
1511 return ct;
1513 err2:
1514 rcu_read_unlock();
1515 err1:
1516 nf_conntrack_free(ct);
1517 return ERR_PTR(err);
1520 static int
1521 ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1522 const struct nlmsghdr *nlh,
1523 const struct nlattr * const cda[])
1525 struct net *net = sock_net(ctnl);
1526 struct nf_conntrack_tuple otuple, rtuple;
1527 struct nf_conntrack_tuple_hash *h = NULL;
1528 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1529 struct nf_conn *ct;
1530 u_int8_t u3 = nfmsg->nfgen_family;
1531 u16 zone;
1532 int err;
1534 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1535 if (err < 0)
1536 return err;
1538 if (cda[CTA_TUPLE_ORIG]) {
1539 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
1540 if (err < 0)
1541 return err;
1544 if (cda[CTA_TUPLE_REPLY]) {
1545 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
1546 if (err < 0)
1547 return err;
1550 if (cda[CTA_TUPLE_ORIG])
1551 h = nf_conntrack_find_get(net, zone, &otuple);
1552 else if (cda[CTA_TUPLE_REPLY])
1553 h = nf_conntrack_find_get(net, zone, &rtuple);
1555 if (h == NULL) {
1556 err = -ENOENT;
1557 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1558 enum ip_conntrack_events events;
1560 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1561 &rtuple, u3);
1562 if (IS_ERR(ct))
1563 return PTR_ERR(ct);
1565 err = 0;
1566 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1567 events = IPCT_RELATED;
1568 else
1569 events = IPCT_NEW;
1571 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1572 (1 << IPCT_ASSURED) |
1573 (1 << IPCT_HELPER) |
1574 (1 << IPCT_PROTOINFO) |
1575 (1 << IPCT_NATSEQADJ) |
1576 (1 << IPCT_MARK) | events,
1577 ct, NETLINK_CB(skb).pid,
1578 nlmsg_report(nlh));
1579 nf_ct_put(ct);
1582 return err;
1584 /* implicit 'else' */
1586 err = -EEXIST;
1587 ct = nf_ct_tuplehash_to_ctrack(h);
1588 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1589 spin_lock_bh(&nf_conntrack_lock);
1590 err = ctnetlink_change_conntrack(ct, cda);
1591 spin_unlock_bh(&nf_conntrack_lock);
1592 if (err == 0) {
1593 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1594 (1 << IPCT_ASSURED) |
1595 (1 << IPCT_HELPER) |
1596 (1 << IPCT_PROTOINFO) |
1597 (1 << IPCT_NATSEQADJ) |
1598 (1 << IPCT_MARK),
1599 ct, NETLINK_CB(skb).pid,
1600 nlmsg_report(nlh));
1604 nf_ct_put(ct);
1605 return err;
1608 /***********************************************************************
1609 * EXPECT
1610 ***********************************************************************/
1612 static inline int
1613 ctnetlink_exp_dump_tuple(struct sk_buff *skb,
1614 const struct nf_conntrack_tuple *tuple,
1615 enum ctattr_expect type)
1617 struct nlattr *nest_parms;
1619 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
1620 if (!nest_parms)
1621 goto nla_put_failure;
1622 if (ctnetlink_dump_tuples(skb, tuple) < 0)
1623 goto nla_put_failure;
1624 nla_nest_end(skb, nest_parms);
1626 return 0;
1628 nla_put_failure:
1629 return -1;
1632 static inline int
1633 ctnetlink_exp_dump_mask(struct sk_buff *skb,
1634 const struct nf_conntrack_tuple *tuple,
1635 const struct nf_conntrack_tuple_mask *mask)
1637 int ret;
1638 struct nf_conntrack_l3proto *l3proto;
1639 struct nf_conntrack_l4proto *l4proto;
1640 struct nf_conntrack_tuple m;
1641 struct nlattr *nest_parms;
1643 memset(&m, 0xFF, sizeof(m));
1644 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
1645 m.src.u.all = mask->src.u.all;
1646 m.dst.protonum = tuple->dst.protonum;
1648 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
1649 if (!nest_parms)
1650 goto nla_put_failure;
1652 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
1653 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
1655 if (unlikely(ret < 0))
1656 goto nla_put_failure;
1658 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
1659 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
1660 if (unlikely(ret < 0))
1661 goto nla_put_failure;
1663 nla_nest_end(skb, nest_parms);
1665 return 0;
1667 nla_put_failure:
1668 return -1;
1671 static int
1672 ctnetlink_exp_dump_expect(struct sk_buff *skb,
1673 const struct nf_conntrack_expect *exp)
1675 struct nf_conn *master = exp->master;
1676 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
1677 struct nf_conn_help *help;
1679 if (timeout < 0)
1680 timeout = 0;
1682 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
1683 goto nla_put_failure;
1684 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
1685 goto nla_put_failure;
1686 if (ctnetlink_exp_dump_tuple(skb,
1687 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1688 CTA_EXPECT_MASTER) < 0)
1689 goto nla_put_failure;
1691 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
1692 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
1693 NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
1694 help = nfct_help(master);
1695 if (help) {
1696 struct nf_conntrack_helper *helper;
1698 helper = rcu_dereference(help->helper);
1699 if (helper)
1700 NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
1703 return 0;
1705 nla_put_failure:
1706 return -1;
1709 static int
1710 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1711 int event, const struct nf_conntrack_expect *exp)
1713 struct nlmsghdr *nlh;
1714 struct nfgenmsg *nfmsg;
1715 unsigned int flags = pid ? NLM_F_MULTI : 0;
1717 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1718 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1719 if (nlh == NULL)
1720 goto nlmsg_failure;
1722 nfmsg = nlmsg_data(nlh);
1723 nfmsg->nfgen_family = exp->tuple.src.l3num;
1724 nfmsg->version = NFNETLINK_V0;
1725 nfmsg->res_id = 0;
1727 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
1728 goto nla_put_failure;
1730 nlmsg_end(skb, nlh);
1731 return skb->len;
1733 nlmsg_failure:
1734 nla_put_failure:
1735 nlmsg_cancel(skb, nlh);
1736 return -1;
1739 #ifdef CONFIG_NF_CONNTRACK_EVENTS
1740 static int
1741 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1743 struct nf_conntrack_expect *exp = item->exp;
1744 struct net *net = nf_ct_exp_net(exp);
1745 struct nlmsghdr *nlh;
1746 struct nfgenmsg *nfmsg;
1747 struct sk_buff *skb;
1748 unsigned int type, group;
1749 int flags = 0;
1751 if (events & (1 << IPEXP_DESTROY)) {
1752 type = IPCTNL_MSG_EXP_DELETE;
1753 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
1754 } else if (events & (1 << IPEXP_NEW)) {
1755 type = IPCTNL_MSG_EXP_NEW;
1756 flags = NLM_F_CREATE|NLM_F_EXCL;
1757 group = NFNLGRP_CONNTRACK_EXP_NEW;
1758 } else
1759 return 0;
1761 if (!item->report && !nfnetlink_has_listeners(net, group))
1762 return 0;
1764 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1765 if (skb == NULL)
1766 goto errout;
1768 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1769 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
1770 if (nlh == NULL)
1771 goto nlmsg_failure;
1773 nfmsg = nlmsg_data(nlh);
1774 nfmsg->nfgen_family = exp->tuple.src.l3num;
1775 nfmsg->version = NFNETLINK_V0;
1776 nfmsg->res_id = 0;
1778 rcu_read_lock();
1779 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
1780 goto nla_put_failure;
1781 rcu_read_unlock();
1783 nlmsg_end(skb, nlh);
1784 nfnetlink_send(skb, net, item->pid, group, item->report, GFP_ATOMIC);
1785 return 0;
1787 nla_put_failure:
1788 rcu_read_unlock();
1789 nlmsg_cancel(skb, nlh);
1790 nlmsg_failure:
1791 kfree_skb(skb);
1792 errout:
1793 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
1794 return 0;
1796 #endif
1797 static int ctnetlink_exp_done(struct netlink_callback *cb)
1799 if (cb->args[1])
1800 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
1801 return 0;
1804 static int
1805 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1807 struct net *net = sock_net(skb->sk);
1808 struct nf_conntrack_expect *exp, *last;
1809 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1810 struct hlist_node *n;
1811 u_int8_t l3proto = nfmsg->nfgen_family;
1813 rcu_read_lock();
1814 last = (struct nf_conntrack_expect *)cb->args[1];
1815 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
1816 restart:
1817 hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]],
1818 hnode) {
1819 if (l3proto && exp->tuple.src.l3num != l3proto)
1820 continue;
1821 if (cb->args[1]) {
1822 if (exp != last)
1823 continue;
1824 cb->args[1] = 0;
1826 if (ctnetlink_exp_fill_info(skb,
1827 NETLINK_CB(cb->skb).pid,
1828 cb->nlh->nlmsg_seq,
1829 IPCTNL_MSG_EXP_NEW,
1830 exp) < 0) {
1831 if (!atomic_inc_not_zero(&exp->use))
1832 continue;
1833 cb->args[1] = (unsigned long)exp;
1834 goto out;
1837 if (cb->args[1]) {
1838 cb->args[1] = 0;
1839 goto restart;
1842 out:
1843 rcu_read_unlock();
1844 if (last)
1845 nf_ct_expect_put(last);
1847 return skb->len;
1850 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1851 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
1852 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
1853 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
1854 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
1855 [CTA_EXPECT_ID] = { .type = NLA_U32 },
1856 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
1857 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
1858 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
1861 static int
1862 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1863 const struct nlmsghdr *nlh,
1864 const struct nlattr * const cda[])
1866 struct net *net = sock_net(ctnl);
1867 struct nf_conntrack_tuple tuple;
1868 struct nf_conntrack_expect *exp;
1869 struct sk_buff *skb2;
1870 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1871 u_int8_t u3 = nfmsg->nfgen_family;
1872 u16 zone;
1873 int err;
1875 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1876 struct netlink_dump_control c = {
1877 .dump = ctnetlink_exp_dump_table,
1878 .done = ctnetlink_exp_done,
1880 return netlink_dump_start(ctnl, skb, nlh, &c);
1883 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1884 if (err < 0)
1885 return err;
1887 if (cda[CTA_EXPECT_TUPLE])
1888 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1889 else if (cda[CTA_EXPECT_MASTER])
1890 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
1891 else
1892 return -EINVAL;
1894 if (err < 0)
1895 return err;
1897 exp = nf_ct_expect_find_get(net, zone, &tuple);
1898 if (!exp)
1899 return -ENOENT;
1901 if (cda[CTA_EXPECT_ID]) {
1902 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
1903 if (ntohl(id) != (u32)(unsigned long)exp) {
1904 nf_ct_expect_put(exp);
1905 return -ENOENT;
1909 err = -ENOMEM;
1910 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1911 if (skb2 == NULL) {
1912 nf_ct_expect_put(exp);
1913 goto out;
1916 rcu_read_lock();
1917 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
1918 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
1919 rcu_read_unlock();
1920 nf_ct_expect_put(exp);
1921 if (err <= 0)
1922 goto free;
1924 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1925 if (err < 0)
1926 goto out;
1928 return 0;
1930 free:
1931 kfree_skb(skb2);
1932 out:
1933 /* this avoids a loop in nfnetlink. */
1934 return err == -EAGAIN ? -ENOBUFS : err;
1937 static int
1938 ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1939 const struct nlmsghdr *nlh,
1940 const struct nlattr * const cda[])
1942 struct net *net = sock_net(ctnl);
1943 struct nf_conntrack_expect *exp;
1944 struct nf_conntrack_tuple tuple;
1945 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1946 struct hlist_node *n, *next;
1947 u_int8_t u3 = nfmsg->nfgen_family;
1948 unsigned int i;
1949 u16 zone;
1950 int err;
1952 if (cda[CTA_EXPECT_TUPLE]) {
1953 /* delete a single expect by tuple */
1954 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1955 if (err < 0)
1956 return err;
1958 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1959 if (err < 0)
1960 return err;
1962 /* bump usage count to 2 */
1963 exp = nf_ct_expect_find_get(net, zone, &tuple);
1964 if (!exp)
1965 return -ENOENT;
1967 if (cda[CTA_EXPECT_ID]) {
1968 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
1969 if (ntohl(id) != (u32)(unsigned long)exp) {
1970 nf_ct_expect_put(exp);
1971 return -ENOENT;
1975 /* after list removal, usage count == 1 */
1976 spin_lock_bh(&nf_conntrack_lock);
1977 if (del_timer(&exp->timeout)) {
1978 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid,
1979 nlmsg_report(nlh));
1980 nf_ct_expect_put(exp);
1982 spin_unlock_bh(&nf_conntrack_lock);
1983 /* have to put what we 'get' above.
1984 * after this line usage count == 0 */
1985 nf_ct_expect_put(exp);
1986 } else if (cda[CTA_EXPECT_HELP_NAME]) {
1987 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
1988 struct nf_conn_help *m_help;
1990 /* delete all expectations for this helper */
1991 spin_lock_bh(&nf_conntrack_lock);
1992 for (i = 0; i < nf_ct_expect_hsize; i++) {
1993 hlist_for_each_entry_safe(exp, n, next,
1994 &net->ct.expect_hash[i],
1995 hnode) {
1996 m_help = nfct_help(exp->master);
1997 if (!strcmp(m_help->helper->name, name) &&
1998 del_timer(&exp->timeout)) {
1999 nf_ct_unlink_expect_report(exp,
2000 NETLINK_CB(skb).pid,
2001 nlmsg_report(nlh));
2002 nf_ct_expect_put(exp);
2006 spin_unlock_bh(&nf_conntrack_lock);
2007 } else {
2008 /* This basically means we have to flush everything*/
2009 spin_lock_bh(&nf_conntrack_lock);
2010 for (i = 0; i < nf_ct_expect_hsize; i++) {
2011 hlist_for_each_entry_safe(exp, n, next,
2012 &net->ct.expect_hash[i],
2013 hnode) {
2014 if (del_timer(&exp->timeout)) {
2015 nf_ct_unlink_expect_report(exp,
2016 NETLINK_CB(skb).pid,
2017 nlmsg_report(nlh));
2018 nf_ct_expect_put(exp);
2022 spin_unlock_bh(&nf_conntrack_lock);
2025 return 0;
2027 static int
2028 ctnetlink_change_expect(struct nf_conntrack_expect *x,
2029 const struct nlattr * const cda[])
2031 return -EOPNOTSUPP;
2034 static int
2035 ctnetlink_create_expect(struct net *net, u16 zone,
2036 const struct nlattr * const cda[],
2037 u_int8_t u3,
2038 u32 pid, int report)
2040 struct nf_conntrack_tuple tuple, mask, master_tuple;
2041 struct nf_conntrack_tuple_hash *h = NULL;
2042 struct nf_conntrack_expect *exp;
2043 struct nf_conn *ct;
2044 struct nf_conn_help *help;
2045 struct nf_conntrack_helper *helper = NULL;
2046 int err = 0;
2048 /* caller guarantees that those three CTA_EXPECT_* exist */
2049 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2050 if (err < 0)
2051 return err;
2052 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2053 if (err < 0)
2054 return err;
2055 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2056 if (err < 0)
2057 return err;
2059 /* Look for master conntrack of this expectation */
2060 h = nf_conntrack_find_get(net, zone, &master_tuple);
2061 if (!h)
2062 return -ENOENT;
2063 ct = nf_ct_tuplehash_to_ctrack(h);
2065 /* Look for helper of this expectation */
2066 if (cda[CTA_EXPECT_HELP_NAME]) {
2067 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2069 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2070 nf_ct_protonum(ct));
2071 if (helper == NULL) {
2072 #ifdef CONFIG_MODULES
2073 if (request_module("nfct-helper-%s", helpname) < 0) {
2074 err = -EOPNOTSUPP;
2075 goto out;
2078 helper = __nf_conntrack_helper_find(helpname,
2079 nf_ct_l3num(ct),
2080 nf_ct_protonum(ct));
2081 if (helper) {
2082 err = -EAGAIN;
2083 goto out;
2085 #endif
2086 err = -EOPNOTSUPP;
2087 goto out;
2091 exp = nf_ct_expect_alloc(ct);
2092 if (!exp) {
2093 err = -ENOMEM;
2094 goto out;
2096 help = nfct_help(ct);
2097 if (!help) {
2098 if (!cda[CTA_EXPECT_TIMEOUT]) {
2099 err = -EINVAL;
2100 goto out;
2102 exp->timeout.expires =
2103 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2105 exp->flags = NF_CT_EXPECT_USERSPACE;
2106 if (cda[CTA_EXPECT_FLAGS]) {
2107 exp->flags |=
2108 ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2110 } else {
2111 if (cda[CTA_EXPECT_FLAGS]) {
2112 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2113 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
2114 } else
2115 exp->flags = 0;
2118 exp->class = 0;
2119 exp->expectfn = NULL;
2120 exp->master = ct;
2121 exp->helper = helper;
2122 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
2123 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
2124 exp->mask.src.u.all = mask.src.u.all;
2126 err = nf_ct_expect_related_report(exp, pid, report);
2127 nf_ct_expect_put(exp);
2129 out:
2130 nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
2131 return err;
2134 static int
2135 ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2136 const struct nlmsghdr *nlh,
2137 const struct nlattr * const cda[])
2139 struct net *net = sock_net(ctnl);
2140 struct nf_conntrack_tuple tuple;
2141 struct nf_conntrack_expect *exp;
2142 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2143 u_int8_t u3 = nfmsg->nfgen_family;
2144 u16 zone;
2145 int err;
2147 if (!cda[CTA_EXPECT_TUPLE]
2148 || !cda[CTA_EXPECT_MASK]
2149 || !cda[CTA_EXPECT_MASTER])
2150 return -EINVAL;
2152 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2153 if (err < 0)
2154 return err;
2156 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2157 if (err < 0)
2158 return err;
2160 spin_lock_bh(&nf_conntrack_lock);
2161 exp = __nf_ct_expect_find(net, zone, &tuple);
2163 if (!exp) {
2164 spin_unlock_bh(&nf_conntrack_lock);
2165 err = -ENOENT;
2166 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2167 err = ctnetlink_create_expect(net, zone, cda,
2169 NETLINK_CB(skb).pid,
2170 nlmsg_report(nlh));
2172 return err;
2175 err = -EEXIST;
2176 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
2177 err = ctnetlink_change_expect(exp, cda);
2178 spin_unlock_bh(&nf_conntrack_lock);
2180 return err;
2183 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2184 static struct nf_ct_event_notifier ctnl_notifier = {
2185 .fcn = ctnetlink_conntrack_event,
2188 static struct nf_exp_event_notifier ctnl_notifier_exp = {
2189 .fcn = ctnetlink_expect_event,
2191 #endif
2193 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
2194 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
2195 .attr_count = CTA_MAX,
2196 .policy = ct_nla_policy },
2197 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
2198 .attr_count = CTA_MAX,
2199 .policy = ct_nla_policy },
2200 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
2201 .attr_count = CTA_MAX,
2202 .policy = ct_nla_policy },
2203 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
2204 .attr_count = CTA_MAX,
2205 .policy = ct_nla_policy },
2208 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
2209 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
2210 .attr_count = CTA_EXPECT_MAX,
2211 .policy = exp_nla_policy },
2212 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
2213 .attr_count = CTA_EXPECT_MAX,
2214 .policy = exp_nla_policy },
2215 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
2216 .attr_count = CTA_EXPECT_MAX,
2217 .policy = exp_nla_policy },
2220 static const struct nfnetlink_subsystem ctnl_subsys = {
2221 .name = "conntrack",
2222 .subsys_id = NFNL_SUBSYS_CTNETLINK,
2223 .cb_count = IPCTNL_MSG_MAX,
2224 .cb = ctnl_cb,
2227 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
2228 .name = "conntrack_expect",
2229 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
2230 .cb_count = IPCTNL_MSG_EXP_MAX,
2231 .cb = ctnl_exp_cb,
2234 MODULE_ALIAS("ip_conntrack_netlink");
2235 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
2236 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
2238 static int __net_init ctnetlink_net_init(struct net *net)
2240 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2241 int ret;
2243 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
2244 if (ret < 0) {
2245 pr_err("ctnetlink_init: cannot register notifier.\n");
2246 goto err_out;
2249 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
2250 if (ret < 0) {
2251 pr_err("ctnetlink_init: cannot expect register notifier.\n");
2252 goto err_unreg_notifier;
2254 #endif
2255 return 0;
2257 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2258 err_unreg_notifier:
2259 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2260 err_out:
2261 return ret;
2262 #endif
2265 static void ctnetlink_net_exit(struct net *net)
2267 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2268 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
2269 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2270 #endif
2273 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
2275 struct net *net;
2277 list_for_each_entry(net, net_exit_list, exit_list)
2278 ctnetlink_net_exit(net);
2281 static struct pernet_operations ctnetlink_net_ops = {
2282 .init = ctnetlink_net_init,
2283 .exit_batch = ctnetlink_net_exit_batch,
2286 static int __init ctnetlink_init(void)
2288 int ret;
2290 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
2291 ret = nfnetlink_subsys_register(&ctnl_subsys);
2292 if (ret < 0) {
2293 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
2294 goto err_out;
2297 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
2298 if (ret < 0) {
2299 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
2300 goto err_unreg_subsys;
2303 if (register_pernet_subsys(&ctnetlink_net_ops)) {
2304 pr_err("ctnetlink_init: cannot register pernet operations\n");
2305 goto err_unreg_exp_subsys;
2308 return 0;
2310 err_unreg_exp_subsys:
2311 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2312 err_unreg_subsys:
2313 nfnetlink_subsys_unregister(&ctnl_subsys);
2314 err_out:
2315 return ret;
2318 static void __exit ctnetlink_exit(void)
2320 pr_info("ctnetlink: unregistering from nfnetlink.\n");
2322 unregister_pernet_subsys(&ctnetlink_net_ops);
2323 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2324 nfnetlink_subsys_unregister(&ctnl_subsys);
2327 module_init(ctnetlink_init);
2328 module_exit(ctnetlink_exit);